aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-06-21 13:59:01 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-06-21 13:59:01 +0000
commit3a0822f094b578157263e04114075ad7df81db41 (patch)
treebc48361fe2cd1ca5f93ac01b38b183774468fc79 /lib
parent85d8b2bbe386bcfe669575d05b61482d7be07e5d (diff)
downloadsrc-3a0822f094b578157263e04114075ad7df81db41.tar.gz
src-3a0822f094b578157263e04114075ad7df81db41.zip
Vendor import of llvm trunk r240225:vendor/llvm/llvm-trunk-r240225
Notes
Notes: svn path=/vendor/llvm/dist/; revision=284677 svn path=/vendor/llvm/llvm-trunk-r240225/; revision=284678; tag=vendor/llvm/llvm-trunk-r240225
Diffstat (limited to 'lib')
-rw-r--r--lib/Analysis/AliasAnalysis.cpp80
-rw-r--r--lib/Analysis/AliasAnalysisCounter.cpp15
-rw-r--r--lib/Analysis/AliasAnalysisEvaluator.cpp128
-rw-r--r--lib/Analysis/AliasDebugger.cpp10
-rw-r--r--lib/Analysis/AliasSetTracker.cpp46
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp155
-rw-r--r--lib/Analysis/BlockFrequencyInfoImpl.cpp81
-rw-r--r--lib/Analysis/CFGPrinter.cpp8
-rw-r--r--lib/Analysis/CFLAliasAnalysis.cpp68
-rw-r--r--lib/Analysis/CaptureTracking.cpp2
-rw-r--r--lib/Analysis/DivergenceAnalysis.cpp2
-rw-r--r--lib/Analysis/DomPrinter.cpp2
-rw-r--r--lib/Analysis/IPA/CallGraph.cpp40
-rw-r--r--lib/Analysis/IPA/CallGraphSCCPass.cpp6
-rw-r--r--lib/Analysis/IPA/CallPrinter.cpp2
-rw-r--r--lib/Analysis/IPA/GlobalsModRef.cpp15
-rw-r--r--lib/Analysis/InstCount.cpp2
-rw-r--r--lib/Analysis/InstructionSimplify.cpp19
-rw-r--r--lib/Analysis/LazyValueInfo.cpp40
-rw-r--r--lib/Analysis/LibCallAliasAnalysis.cpp4
-rw-r--r--lib/Analysis/LibCallSemantics.cpp5
-rw-r--r--lib/Analysis/Lint.cpp61
-rw-r--r--lib/Analysis/LoopAccessAnalysis.cpp12
-rw-r--r--lib/Analysis/LoopPass.cpp2
-rw-r--r--lib/Analysis/MemDepPrinter.cpp2
-rw-r--r--lib/Analysis/MemDerefPrinter.cpp2
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp72
-rw-r--r--lib/Analysis/MemoryLocation.cpp84
-rw-r--r--lib/Analysis/ModuleDebugInfoPrinter.cpp2
-rw-r--r--lib/Analysis/NoAliasAnalysis.cpp17
-rw-r--r--lib/Analysis/PHITransAddr.cpp15
-rw-r--r--lib/Analysis/RegionPrinter.cpp2
-rw-r--r--lib/Analysis/ScalarEvolution.cpp18
-rw-r--r--lib/Analysis/ScalarEvolutionAliasAnalysis.cpp19
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp6
-rw-r--r--lib/Analysis/ScopedNoAliasAA.cpp16
-rw-r--r--lib/Analysis/StratifiedSets.h2
-rw-r--r--lib/Analysis/TypeBasedAliasAnalysis.cpp18
-rw-r--r--lib/Analysis/ValueTracking.cpp306
-rw-r--r--lib/AsmParser/CMakeLists.txt3
-rw-r--r--lib/AsmParser/LLLexer.cpp1
-rw-r--r--lib/AsmParser/LLParser.cpp21
-rw-r--r--lib/AsmParser/LLParser.h2
-rw-r--r--lib/AsmParser/LLToken.h1
-rw-r--r--lib/Bitcode/Reader/BitReader.cpp8
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp1373
-rw-r--r--lib/Bitcode/Writer/BitcodeWriter.cpp12
-rw-r--r--lib/Bitcode/Writer/BitcodeWriterPass.cpp2
-rw-r--r--lib/Bitcode/Writer/CMakeLists.txt3
-rw-r--r--lib/Bitcode/Writer/ValueEnumerator.cpp50
-rw-r--r--lib/Bitcode/Writer/ValueEnumerator.h2
-rw-r--r--lib/CMakeLists.txt1
-rw-r--r--lib/CodeGen/AggressiveAntiDepBreaker.h2
-rw-r--r--lib/CodeGen/AntiDepBreaker.h2
-rw-r--r--lib/CodeGen/AsmPrinter/AddressPool.h2
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp84
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp32
-rw-r--r--lib/CodeGen/AsmPrinter/ByteStreamer.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DIE.cpp6
-rw-r--r--lib/CodeGen/AsmPrinter/DIEHash.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DebugLocEntry.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DebugLocStream.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfAccelTable.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfCompileUnit.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfDebug.cpp6
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfExpression.cpp37
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfExpression.h5
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfFile.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfFile.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfStringPool.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfUnit.cpp8
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfUnit.h2
-rw-r--r--lib/CodeGen/AsmPrinter/EHStreamer.h2
-rw-r--r--lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/WinException.cpp109
-rw-r--r--lib/CodeGen/AsmPrinter/WinException.h9
-rw-r--r--lib/CodeGen/AtomicExpandPass.cpp4
-rw-r--r--lib/CodeGen/BranchFolding.cpp21
-rw-r--r--lib/CodeGen/BranchFolding.h2
-rw-r--r--lib/CodeGen/CMakeLists.txt3
-rw-r--r--lib/CodeGen/CallingConvLower.cpp28
-rw-r--r--lib/CodeGen/CodeGen.cpp1
-rw-r--r--lib/CodeGen/CodeGenPrepare.cpp51
-rw-r--r--lib/CodeGen/CoreCLRGC.cpp2
-rw-r--r--lib/CodeGen/CriticalAntiDepBreaker.h2
-rw-r--r--lib/CodeGen/DFAPacketizer.cpp2
-rw-r--r--lib/CodeGen/DeadMachineInstructionElim.cpp2
-rw-r--r--lib/CodeGen/DwarfEHPrepare.cpp17
-rw-r--r--lib/CodeGen/EarlyIfConversion.cpp17
-rw-r--r--lib/CodeGen/EdgeBundles.cpp2
-rw-r--r--lib/CodeGen/ExecutionDepsFix.cpp4
-rw-r--r--lib/CodeGen/FaultMaps.cpp114
-rw-r--r--lib/CodeGen/GCMetadata.cpp2
-rw-r--r--lib/CodeGen/GCRootLowering.cpp2
-rw-r--r--lib/CodeGen/IfConversion.cpp2
-rw-r--r--lib/CodeGen/ImplicitNullChecks.cpp261
-rw-r--r--lib/CodeGen/InlineSpiller.cpp4
-rw-r--r--lib/CodeGen/LLVMBuild.txt2
-rw-r--r--lib/CodeGen/LLVMTargetMachine.cpp50
-rw-r--r--lib/CodeGen/LiveVariables.cpp39
-rw-r--r--lib/CodeGen/MIRParser/MIRParser.cpp175
-rw-r--r--lib/CodeGen/MIRPrinter.cpp96
-rw-r--r--lib/CodeGen/MIRPrinter.h33
-rw-r--r--lib/CodeGen/MIRPrintingPass.cpp44
-rw-r--r--lib/CodeGen/MachineBlockPlacement.cpp6
-rw-r--r--lib/CodeGen/MachineCombiner.cpp34
-rw-r--r--lib/CodeGen/MachineCopyPropagation.cpp2
-rw-r--r--lib/CodeGen/MachineFunction.cpp103
-rw-r--r--lib/CodeGen/MachineFunctionAnalysis.cpp8
-rw-r--r--lib/CodeGen/MachineFunctionPrinterPass.cpp2
-rw-r--r--lib/CodeGen/MachineInstr.cpp6
-rw-r--r--lib/CodeGen/MachineLICM.cpp21
-rw-r--r--lib/CodeGen/MachineModuleInfo.cpp19
-rw-r--r--lib/CodeGen/MachineSSAUpdater.cpp2
-rw-r--r--lib/CodeGen/MachineScheduler.cpp12
-rw-r--r--lib/CodeGen/MachineSink.cpp125
-rw-r--r--lib/CodeGen/MachineTraceMetrics.cpp10
-rw-r--r--lib/CodeGen/MachineVerifier.cpp4
-rw-r--r--lib/CodeGen/OptimizePHIs.cpp2
-rw-r--r--lib/CodeGen/PHIElimination.cpp25
-rw-r--r--lib/CodeGen/Passes.cpp11
-rw-r--r--lib/CodeGen/PeepholeOptimizer.cpp2
-rw-r--r--lib/CodeGen/PostRASchedulerList.cpp4
-rw-r--r--lib/CodeGen/RegAllocFast.cpp2
-rw-r--r--lib/CodeGen/RegisterCoalescer.cpp5
-rw-r--r--lib/CodeGen/RegisterCoalescer.h2
-rw-r--r--lib/CodeGen/RegisterScavenging.cpp10
-rw-r--r--lib/CodeGen/ScheduleDAGInstrs.cpp12
-rw-r--r--lib/CodeGen/ScheduleDAGPrinter.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp198
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp27
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.h2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/SDNodeDbgValue.h2
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h2
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp95
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp156
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h5
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp10
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/StatepointLowering.cpp187
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp5
-rw-r--r--lib/CodeGen/ShadowStackGCLowering.cpp14
-rw-r--r--lib/CodeGen/SjLjEHPrepare.cpp2
-rw-r--r--lib/CodeGen/Spiller.h2
-rw-r--r--lib/CodeGen/SplitKit.h2
-rw-r--r--lib/CodeGen/StatepointExampleGC.cpp2
-rw-r--r--lib/CodeGen/TailDuplication.cpp2
-rw-r--r--lib/CodeGen/TargetInstrInfo.cpp9
-rw-r--r--lib/CodeGen/UnreachableBlockElim.cpp2
-rw-r--r--lib/CodeGen/VirtRegMap.cpp81
-rw-r--r--lib/CodeGen/WinEHPrepare.cpp43
-rw-r--r--lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp2
-rw-r--r--lib/DebugInfo/DWARF/DWARFContext.cpp9
-rw-r--r--lib/DebugInfo/DWARF/DWARFFormValue.cpp2
-rw-r--r--lib/DebugInfo/DWARF/SyntaxHighlighting.h6
-rw-r--r--lib/DebugInfo/PDB/CMakeLists.txt2
-rw-r--r--lib/DebugInfo/PDB/PDBSymbolFunc.cpp2
-rw-r--r--lib/DebugInfo/PDB/PDBSymbolTypeFunctionSig.cpp2
-rw-r--r--lib/ExecutionEngine/CMakeLists.txt3
-rw-r--r--lib/ExecutionEngine/ExecutionEngine.cpp10
-rw-r--r--lib/ExecutionEngine/Interpreter/Execution.cpp3
-rw-r--r--lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp49
-rw-r--r--lib/ExecutionEngine/Interpreter/Interpreter.cpp14
-rw-r--r--lib/ExecutionEngine/Interpreter/Interpreter.h8
-rw-r--r--lib/ExecutionEngine/MCJIT/CMakeLists.txt3
-rw-r--r--lib/ExecutionEngine/MCJIT/MCJIT.cpp28
-rw-r--r--lib/ExecutionEngine/MCJIT/MCJIT.h18
-rw-r--r--lib/ExecutionEngine/Orc/CMakeLists.txt3
-rw-r--r--lib/ExecutionEngine/Orc/IndirectionUtils.cpp2
-rw-r--r--lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp2
-rw-r--r--lib/ExecutionEngine/Orc/OrcMCJITReplacement.h2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt3
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp20
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp25
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h4
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h2
-rw-r--r--lib/IR/AsmWriter.cpp75
-rw-r--r--lib/IR/AttributeImpl.h16
-rw-r--r--lib/IR/Attributes.cpp3
-rw-r--r--lib/IR/BasicBlock.cpp5
-rw-r--r--lib/IR/ConstantFold.cpp8
-rw-r--r--lib/IR/ConstantFold.h2
-rw-r--r--lib/IR/Constants.cpp5
-rw-r--r--lib/IR/Core.cpp7
-rw-r--r--lib/IR/DIBuilder.cpp5
-rw-r--r--lib/IR/DiagnosticInfo.cpp12
-rw-r--r--lib/IR/DiagnosticPrinter.cpp10
-rw-r--r--lib/IR/Function.cpp48
-rw-r--r--lib/IR/GCOV.cpp2
-rw-r--r--lib/IR/Globals.cpp10
-rw-r--r--lib/IR/IRBuilder.cpp8
-rw-r--r--lib/IR/IRPrintingPasses.cpp2
-rw-r--r--lib/IR/Instruction.cpp6
-rw-r--r--lib/IR/Instructions.cpp181
-rw-r--r--lib/IR/LLVMContext.cpp35
-rw-r--r--lib/IR/LLVMContextImpl.cpp4
-rw-r--r--lib/IR/LLVMContextImpl.h2
-rw-r--r--lib/IR/LegacyPassManager.cpp10
-rw-r--r--lib/IR/Metadata.cpp25
-rw-r--r--lib/IR/Operator.cpp2
-rw-r--r--lib/IR/Pass.cpp2
-rw-r--r--lib/IR/SymbolTableListTraitsImpl.h2
-rw-r--r--lib/IR/TypeFinder.cpp3
-rw-r--r--lib/IR/Use.cpp2
-rw-r--r--lib/IR/User.cpp84
-rw-r--r--lib/IR/Value.cpp3
-rw-r--r--lib/IR/Verifier.cpp31
-rw-r--r--lib/IRReader/CMakeLists.txt3
-rw-r--r--lib/IRReader/IRReader.cpp9
-rw-r--r--lib/LLVMBuild.txt25
-rw-r--r--lib/LTO/LLVMBuild.txt17
-rw-r--r--lib/LTO/LTOModule.cpp26
-rw-r--r--lib/LibDriver/CMakeLists.txt8
-rw-r--r--lib/LibDriver/LLVMBuild.txt (renamed from lib/Target/R600/MCTargetDesc/LLVMBuild.txt)9
-rw-r--r--lib/LibDriver/LibDriver.cpp157
-rw-r--r--lib/LibDriver/Makefile20
-rw-r--r--lib/LibDriver/Options.td23
-rw-r--r--lib/Linker/CMakeLists.txt3
-rw-r--r--lib/Linker/LinkModules.cpp45
-rw-r--r--lib/MC/ELFObjectWriter.cpp3
-rw-r--r--lib/MC/MCAsmStreamer.cpp5
-rw-r--r--lib/MC/MCAssembler.cpp142
-rw-r--r--lib/MC/MCContext.cpp30
-rw-r--r--lib/MC/MCDisassembler/MCExternalSymbolizer.cpp2
-rw-r--r--lib/MC/MCDisassembler/MCRelocationInfo.cpp3
-rw-r--r--lib/MC/MCDwarf.cpp21
-rw-r--r--lib/MC/MCELFStreamer.cpp2
-rw-r--r--lib/MC/MCNullStreamer.cpp2
-rw-r--r--lib/MC/MCObjectFileInfo.cpp17
-rw-r--r--lib/MC/MCObjectStreamer.cpp15
-rw-r--r--lib/MC/MCParser/AsmParser.cpp10
-rw-r--r--lib/MC/MCParser/CMakeLists.txt2
-rw-r--r--lib/MC/MCParser/COFFAsmParser.cpp2
-rw-r--r--lib/MC/MCParser/ELFAsmParser.cpp2
-rw-r--r--lib/MC/MCStreamer.cpp25
-rw-r--r--lib/MC/MCSubtargetInfo.cpp17
-rw-r--r--lib/MC/MCSymbol.cpp19
-rw-r--r--lib/MC/MCSymbolELF.cpp18
-rw-r--r--lib/MC/MCWin64EH.cpp2
-rw-r--r--lib/MC/MCWinEH.cpp4
-rw-r--r--lib/MC/WinCOFFObjectWriter.cpp11
-rw-r--r--lib/MC/WinCOFFStreamer.cpp12
-rw-r--r--lib/Makefile2
-rw-r--r--lib/Object/ArchiveWriter.cpp16
-rw-r--r--lib/Object/CMakeLists.txt3
-rw-r--r--lib/Object/COFFObjectFile.cpp13
-rw-r--r--lib/Object/COFFYAML.cpp6
-rw-r--r--lib/Object/ELFYAML.cpp6
-rw-r--r--lib/Object/IRObjectFile.cpp19
-rw-r--r--lib/Object/MachOObjectFile.cpp7
-rw-r--r--lib/Object/RecordStreamer.h2
-rw-r--r--lib/Option/OptTable.cpp4
-rw-r--r--lib/ProfileData/CMakeLists.txt3
-rw-r--r--lib/ProfileData/CoverageMapping.cpp9
-rw-r--r--lib/ProfileData/CoverageMappingReader.cpp2
-rw-r--r--lib/ProfileData/CoverageMappingWriter.cpp2
-rw-r--r--lib/ProfileData/InstrProf.cpp2
-rw-r--r--lib/ProfileData/InstrProfIndexed.h2
-rw-r--r--lib/ProfileData/InstrProfWriter.cpp2
-rw-r--r--lib/ProfileData/SampleProf.cpp2
-rw-r--r--lib/Support/APFloat.cpp4
-rw-r--r--lib/Support/APInt.cpp2
-rw-r--r--lib/Support/ARMBuildAttrs.cpp6
-rw-r--r--lib/Support/ARMWinEH.cpp6
-rw-r--r--lib/Support/Allocator.cpp2
-rw-r--r--lib/Support/CMakeLists.txt1
-rw-r--r--lib/Support/CommandLine.cpp42
-rw-r--r--lib/Support/CrashRecoveryContext.cpp2
-rw-r--r--lib/Support/DAGDeltaAlgorithm.cpp2
-rw-r--r--lib/Support/DataStream.cpp14
-rw-r--r--lib/Support/Debug.cpp2
-rw-r--r--lib/Support/FileOutputBuffer.cpp2
-rw-r--r--lib/Support/Locale.cpp2
-rw-r--r--lib/Support/MD5.cpp2
-rw-r--r--lib/Support/MathExtras.cpp2
-rw-r--r--lib/Support/MemoryBuffer.cpp4
-rw-r--r--lib/Support/Mutex.cpp2
-rw-r--r--lib/Support/RWMutex.cpp2
-rw-r--r--lib/Support/SourceMgr.cpp46
-rw-r--r--lib/Support/Statistic.cpp2
-rw-r--r--lib/Support/StreamingMemoryObject.cpp11
-rw-r--r--lib/Support/StringSaver.cpp19
-rw-r--r--lib/Support/TargetParser.cpp6
-rw-r--r--lib/Support/TimeValue.cpp2
-rw-r--r--lib/Support/Timer.cpp4
-rw-r--r--lib/Support/Triple.cpp7
-rw-r--r--lib/Support/Unix/Process.inc2
-rw-r--r--lib/Support/Unix/Program.inc9
-rw-r--r--lib/Support/Unix/ThreadLocal.inc2
-rw-r--r--lib/Support/Unix/TimeValue.inc2
-rw-r--r--lib/Support/Unix/Watchdog.inc4
-rw-r--r--lib/Support/Windows/Memory.inc10
-rw-r--r--lib/Support/Windows/Program.inc3
-rw-r--r--lib/Support/YAMLParser.cpp8
-rw-r--r--lib/TableGen/TGLexer.h2
-rw-r--r--lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp2
-rw-r--r--lib/Target/AArch64/AArch64AsmPrinter.cpp2
-rw-r--r--lib/Target/AArch64/AArch64BranchRelaxation.cpp2
-rw-r--r--lib/Target/AArch64/AArch64CallingConvention.h2
-rw-r--r--lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp2
-rw-r--r--lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp2
-rw-r--r--lib/Target/AArch64/AArch64FastISel.cpp6
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.h2
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp259
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.h7
-rw-r--r--lib/Target/AArch64/AArch64InstrFormats.td77
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp56
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.h41
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td29
-rw-r--r--lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp5
-rw-r--r--lib/Target/AArch64/AArch64MCInstLower.h2
-rw-r--r--lib/Target/AArch64/AArch64MachineFunctionInfo.h2
-rw-r--r--lib/Target/AArch64/AArch64PBQPRegAlloc.cpp2
-rw-r--r--lib/Target/AArch64/AArch64PBQPRegAlloc.h2
-rw-r--r--lib/Target/AArch64/AArch64SelectionDAGInfo.h2
-rw-r--r--lib/Target/AArch64/AArch64StorePairSuppress.cpp4
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.cpp8
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.h7
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp38
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.h6
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp2
-rw-r--r--lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp2
-rw-r--r--lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h2
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp20
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp2
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp4
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp4
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h10
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp4
-rw-r--r--lib/Target/AArch64/Utils/AArch64BaseInfo.h22
-rw-r--r--lib/Target/AMDGPU/AMDGPU.h (renamed from lib/Target/R600/AMDGPU.h)0
-rw-r--r--lib/Target/AMDGPU/AMDGPU.td (renamed from lib/Target/R600/AMDGPU.td)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp (renamed from lib/Target/R600/AMDGPUAlwaysInlinePass.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp (renamed from lib/Target/R600/AMDGPUAsmPrinter.cpp)8
-rw-r--r--lib/Target/AMDGPU/AMDGPUAsmPrinter.h (renamed from lib/Target/R600/AMDGPUAsmPrinter.h)2
-rw-r--r--lib/Target/AMDGPU/AMDGPUCallingConv.td (renamed from lib/Target/R600/AMDGPUCallingConv.td)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUFrameLowering.cpp (renamed from lib/Target/R600/AMDGPUFrameLowering.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUFrameLowering.h (renamed from lib/Target/R600/AMDGPUFrameLowering.h)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp (renamed from lib/Target/R600/AMDGPUISelDAGToDAG.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.cpp (renamed from lib/Target/R600/AMDGPUISelLowering.cpp)2
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.h (renamed from lib/Target/R600/AMDGPUISelLowering.h)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstrInfo.cpp (renamed from lib/Target/R600/AMDGPUInstrInfo.cpp)7
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstrInfo.h (renamed from lib/Target/R600/AMDGPUInstrInfo.h)6
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstrInfo.td (renamed from lib/Target/R600/AMDGPUInstrInfo.td)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstructions.td (renamed from lib/Target/R600/AMDGPUInstructions.td)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp (renamed from lib/Target/R600/AMDGPUIntrinsicInfo.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h (renamed from lib/Target/R600/AMDGPUIntrinsicInfo.h)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUIntrinsics.td (renamed from lib/Target/R600/AMDGPUIntrinsics.td)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUMCInstLower.cpp (renamed from lib/Target/R600/AMDGPUMCInstLower.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUMCInstLower.h (renamed from lib/Target/R600/AMDGPUMCInstLower.h)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUMachineFunction.cpp (renamed from lib/Target/R600/AMDGPUMachineFunction.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUMachineFunction.h (renamed from lib/Target/R600/AMDGPUMachineFunction.h)2
-rw-r--r--lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (renamed from lib/Target/R600/AMDGPUPromoteAlloca.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDGPURegisterInfo.cpp (renamed from lib/Target/R600/AMDGPURegisterInfo.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDGPURegisterInfo.h (renamed from lib/Target/R600/AMDGPURegisterInfo.h)0
-rw-r--r--lib/Target/AMDGPU/AMDGPURegisterInfo.td (renamed from lib/Target/R600/AMDGPURegisterInfo.td)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUSubtarget.cpp (renamed from lib/Target/R600/AMDGPUSubtarget.cpp)13
-rw-r--r--lib/Target/AMDGPU/AMDGPUSubtarget.h (renamed from lib/Target/R600/AMDGPUSubtarget.h)7
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (renamed from lib/Target/R600/AMDGPUTargetMachine.cpp)28
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetMachine.h (renamed from lib/Target/R600/AMDGPUTargetMachine.h)10
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (renamed from lib/Target/R600/AMDGPUTargetTransformInfo.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h (renamed from lib/Target/R600/AMDGPUTargetTransformInfo.h)0
-rw-r--r--lib/Target/AMDGPU/AMDILCFGStructurizer.cpp (renamed from lib/Target/R600/AMDILCFGStructurizer.cpp)0
-rw-r--r--lib/Target/AMDGPU/AMDKernelCodeT.h (renamed from lib/Target/R600/AMDKernelCodeT.h)2
-rw-r--r--lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp (renamed from lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp)69
-rw-r--r--lib/Target/AMDGPU/AsmParser/CMakeLists.txt3
-rw-r--r--lib/Target/AMDGPU/AsmParser/LLVMBuild.txt23
-rw-r--r--lib/Target/AMDGPU/AsmParser/Makefile (renamed from lib/Target/R600/AsmParser/Makefile)6
-rw-r--r--lib/Target/AMDGPU/CIInstructions.td149
-rw-r--r--lib/Target/AMDGPU/CMakeLists.txt (renamed from lib/Target/R600/CMakeLists.txt)2
-rw-r--r--lib/Target/AMDGPU/CaymanInstructions.td (renamed from lib/Target/R600/CaymanInstructions.td)0
-rw-r--r--lib/Target/AMDGPU/EvergreenInstructions.td (renamed from lib/Target/R600/EvergreenInstructions.td)0
-rw-r--r--lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp (renamed from lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp)2
-rw-r--r--lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h (renamed from lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h)0
-rw-r--r--lib/Target/AMDGPU/InstPrinter/CMakeLists.txt3
-rw-r--r--lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt (renamed from lib/Target/R600/InstPrinter/LLVMBuild.txt)8
-rw-r--r--lib/Target/AMDGPU/InstPrinter/Makefile (renamed from lib/Target/R600/InstPrinter/Makefile)2
-rw-r--r--lib/Target/AMDGPU/LLVMBuild.txt (renamed from lib/Target/R600/LLVMBuild.txt)10
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp (renamed from lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp)3
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp (renamed from lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp)0
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h (renamed from lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h)4
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp (renamed from lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp)0
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h (renamed from lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h)0
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp (renamed from lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp)0
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h (renamed from lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h)0
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp (renamed from lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp)6
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h (renamed from lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h)5
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt (renamed from lib/Target/R600/MCTargetDesc/CMakeLists.txt)2
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt (renamed from lib/Target/R600/AsmParser/LLVMBuild.txt)10
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/Makefile (renamed from lib/Target/R600/MCTargetDesc/Makefile)2
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp (renamed from lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp)0
-rw-r--r--lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp (renamed from lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp)0
-rw-r--r--lib/Target/AMDGPU/Makefile (renamed from lib/Target/R600/Makefile)2
-rw-r--r--lib/Target/AMDGPU/Processors.td (renamed from lib/Target/R600/Processors.td)0
-rw-r--r--lib/Target/AMDGPU/R600ClauseMergePass.cpp (renamed from lib/Target/R600/R600ClauseMergePass.cpp)0
-rw-r--r--lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp (renamed from lib/Target/R600/R600ControlFlowFinalizer.cpp)0
-rw-r--r--lib/Target/AMDGPU/R600Defines.h (renamed from lib/Target/R600/R600Defines.h)4
-rw-r--r--lib/Target/AMDGPU/R600EmitClauseMarkers.cpp (renamed from lib/Target/R600/R600EmitClauseMarkers.cpp)0
-rw-r--r--lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp (renamed from lib/Target/R600/R600ExpandSpecialInstrs.cpp)0
-rw-r--r--lib/Target/AMDGPU/R600ISelLowering.cpp (renamed from lib/Target/R600/R600ISelLowering.cpp)0
-rw-r--r--lib/Target/AMDGPU/R600ISelLowering.h (renamed from lib/Target/R600/R600ISelLowering.h)2
-rw-r--r--lib/Target/AMDGPU/R600InstrFormats.td (renamed from lib/Target/R600/R600InstrFormats.td)0
-rw-r--r--lib/Target/AMDGPU/R600InstrInfo.cpp (renamed from lib/Target/R600/R600InstrInfo.cpp)13
-rw-r--r--lib/Target/AMDGPU/R600InstrInfo.h (renamed from lib/Target/R600/R600InstrInfo.h)12
-rw-r--r--lib/Target/AMDGPU/R600Instructions.td (renamed from lib/Target/R600/R600Instructions.td)0
-rw-r--r--lib/Target/AMDGPU/R600Intrinsics.td (renamed from lib/Target/R600/R600Intrinsics.td)0
-rw-r--r--lib/Target/AMDGPU/R600MachineFunctionInfo.cpp (renamed from lib/Target/R600/R600MachineFunctionInfo.cpp)0
-rw-r--r--lib/Target/AMDGPU/R600MachineFunctionInfo.h (renamed from lib/Target/R600/R600MachineFunctionInfo.h)2
-rw-r--r--lib/Target/AMDGPU/R600MachineScheduler.cpp (renamed from lib/Target/R600/R600MachineScheduler.cpp)0
-rw-r--r--lib/Target/AMDGPU/R600MachineScheduler.h (renamed from lib/Target/R600/R600MachineScheduler.h)0
-rw-r--r--lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp (renamed from lib/Target/R600/R600OptimizeVectorRegisters.cpp)2
-rw-r--r--lib/Target/AMDGPU/R600Packetizer.cpp (renamed from lib/Target/R600/R600Packetizer.cpp)0
-rw-r--r--lib/Target/AMDGPU/R600RegisterInfo.cpp (renamed from lib/Target/R600/R600RegisterInfo.cpp)0
-rw-r--r--lib/Target/AMDGPU/R600RegisterInfo.h (renamed from lib/Target/R600/R600RegisterInfo.h)0
-rw-r--r--lib/Target/AMDGPU/R600RegisterInfo.td (renamed from lib/Target/R600/R600RegisterInfo.td)0
-rw-r--r--lib/Target/AMDGPU/R600Schedule.td (renamed from lib/Target/R600/R600Schedule.td)0
-rw-r--r--lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp (renamed from lib/Target/R600/R600TextureIntrinsicsReplacer.cpp)2
-rw-r--r--lib/Target/AMDGPU/R700Instructions.td (renamed from lib/Target/R600/R700Instructions.td)0
-rw-r--r--lib/Target/AMDGPU/SIAnnotateControlFlow.cpp (renamed from lib/Target/R600/SIAnnotateControlFlow.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIDefines.h (renamed from lib/Target/R600/SIDefines.h)4
-rw-r--r--lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp (renamed from lib/Target/R600/SIFixControlFlowLiveIntervals.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIFixSGPRCopies.cpp (renamed from lib/Target/R600/SIFixSGPRCopies.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp (renamed from lib/Target/R600/SIFixSGPRLiveRanges.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIFoldOperands.cpp (renamed from lib/Target/R600/SIFoldOperands.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.cpp (renamed from lib/Target/R600/SIISelLowering.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.h (renamed from lib/Target/R600/SIISelLowering.h)0
-rw-r--r--lib/Target/AMDGPU/SIInsertWaits.cpp (renamed from lib/Target/R600/SIInsertWaits.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIInstrFormats.td (renamed from lib/Target/R600/SIInstrFormats.td)2
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.cpp (renamed from lib/Target/R600/SIInstrInfo.cpp)12
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.h (renamed from lib/Target/R600/SIInstrInfo.h)6
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.td (renamed from lib/Target/R600/SIInstrInfo.td)92
-rw-r--r--lib/Target/AMDGPU/SIInstructions.td (renamed from lib/Target/R600/SIInstructions.td)108
-rw-r--r--lib/Target/AMDGPU/SIIntrinsics.td (renamed from lib/Target/R600/SIIntrinsics.td)0
-rw-r--r--lib/Target/AMDGPU/SILoadStoreOptimizer.cpp (renamed from lib/Target/R600/SILoadStoreOptimizer.cpp)0
-rw-r--r--lib/Target/AMDGPU/SILowerControlFlow.cpp (renamed from lib/Target/R600/SILowerControlFlow.cpp)0
-rw-r--r--lib/Target/AMDGPU/SILowerI1Copies.cpp (renamed from lib/Target/R600/SILowerI1Copies.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIMachineFunctionInfo.cpp (renamed from lib/Target/R600/SIMachineFunctionInfo.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIMachineFunctionInfo.h (renamed from lib/Target/R600/SIMachineFunctionInfo.h)0
-rw-r--r--lib/Target/AMDGPU/SIPrepareScratchRegs.cpp (renamed from lib/Target/R600/SIPrepareScratchRegs.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIRegisterInfo.cpp (renamed from lib/Target/R600/SIRegisterInfo.cpp)0
-rw-r--r--lib/Target/AMDGPU/SIRegisterInfo.h (renamed from lib/Target/R600/SIRegisterInfo.h)0
-rw-r--r--lib/Target/AMDGPU/SIRegisterInfo.td (renamed from lib/Target/R600/SIRegisterInfo.td)0
-rw-r--r--lib/Target/AMDGPU/SISchedule.td (renamed from lib/Target/R600/SISchedule.td)0
-rw-r--r--lib/Target/AMDGPU/SIShrinkInstructions.cpp (renamed from lib/Target/R600/SIShrinkInstructions.cpp)0
-rw-r--r--lib/Target/AMDGPU/SITypeRewriter.cpp (renamed from lib/Target/R600/SITypeRewriter.cpp)0
-rw-r--r--lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp (renamed from lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp)2
-rw-r--r--lib/Target/AMDGPU/TargetInfo/CMakeLists.txt3
-rw-r--r--lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt (renamed from lib/Target/R600/TargetInfo/LLVMBuild.txt)8
-rw-r--r--lib/Target/AMDGPU/TargetInfo/Makefile (renamed from lib/Target/R600/TargetInfo/Makefile)2
-rw-r--r--lib/Target/AMDGPU/VIInstrFormats.td (renamed from lib/Target/R600/VIInstrFormats.td)0
-rw-r--r--lib/Target/AMDGPU/VIInstructions.td (renamed from lib/Target/R600/VIInstructions.td)0
-rw-r--r--lib/Target/ARM/ARM.h2
-rw-r--r--lib/Target/ARM/ARMAsmPrinter.cpp6
-rw-r--r--lib/Target/ARM/ARMAsmPrinter.h2
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp23
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.h15
-rw-r--r--lib/Target/ARM/ARMCallingConv.h2
-rw-r--r--lib/Target/ARM/ARMConstantIslandPass.cpp2
-rw-r--r--lib/Target/ARM/ARMConstantPoolValue.h4
-rw-r--r--lib/Target/ARM/ARMExpandPseudoInsts.cpp4
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp2
-rw-r--r--lib/Target/ARM/ARMFeatures.h2
-rw-r--r--lib/Target/ARM/ARMFrameLowering.cpp2
-rw-r--r--lib/Target/ARM/ARMFrameLowering.h2
-rw-r--r--lib/Target/ARM/ARMISelDAGToDAG.cpp2
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp110
-rw-r--r--lib/Target/ARM/ARMISelLowering.h4
-rw-r--r--lib/Target/ARM/ARMInstrInfo.cpp2
-rw-r--r--lib/Target/ARM/ARMInstrInfo.h2
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp4
-rw-r--r--lib/Target/ARM/ARMMachineFunctionInfo.h2
-rw-r--r--lib/Target/ARM/ARMOptimizeBarriersPass.cpp2
-rw-r--r--lib/Target/ARM/ARMSelectionDAGInfo.h2
-rw-r--r--lib/Target/ARM/ARMSubtarget.cpp7
-rw-r--r--lib/Target/ARM/ARMSubtarget.h8
-rw-r--r--lib/Target/ARM/ARMTargetMachine.cpp36
-rw-r--r--lib/Target/ARM/ARMTargetMachine.h34
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp2
-rw-r--r--lib/Target/ARM/Disassembler/ARMDisassembler.cpp4
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp23
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h5
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h5
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h5
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h4
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h4
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp2
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp4
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h4
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp4
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp24
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h28
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp2
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp2
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp2
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp2
-rw-r--r--lib/Target/ARM/MLxExpansionPass.cpp2
-rw-r--r--lib/Target/ARM/Thumb1FrameLowering.h2
-rw-r--r--lib/Target/ARM/Thumb1InstrInfo.h2
-rw-r--r--lib/Target/ARM/Thumb2ITBlockPass.cpp2
-rw-r--r--lib/Target/ARM/Thumb2InstrInfo.h2
-rw-r--r--lib/Target/ARM/Thumb2SizeReduction.cpp2
-rw-r--r--lib/Target/ARM/ThumbRegisterInfo.h2
-rw-r--r--lib/Target/BPF/BPFAsmPrinter.cpp2
-rw-r--r--lib/Target/BPF/BPFFrameLowering.h2
-rw-r--r--lib/Target/BPF/BPFISelDAGToDAG.cpp2
-rw-r--r--lib/Target/BPF/BPFISelLowering.cpp2
-rw-r--r--lib/Target/BPF/BPFISelLowering.h2
-rw-r--r--lib/Target/BPF/BPFInstrInfo.cpp2
-rw-r--r--lib/Target/BPF/BPFInstrInfo.h5
-rw-r--r--lib/Target/BPF/BPFMCInstLower.h2
-rw-r--r--lib/Target/BPF/BPFRegisterInfo.h2
-rw-r--r--lib/Target/BPF/BPFSubtarget.cpp2
-rw-r--r--lib/Target/BPF/BPFSubtarget.h6
-rw-r--r--lib/Target/BPF/BPFTargetMachine.cpp15
-rw-r--r--lib/Target/BPF/BPFTargetMachine.h6
-rw-r--r--lib/Target/BPF/InstPrinter/BPFInstPrinter.h2
-rw-r--r--lib/Target/BPF/LLVMBuild.txt12
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp10
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp2
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h2
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp2
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp4
-rw-r--r--lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h9
-rw-r--r--lib/Target/CppBackend/CPPBackend.cpp4
-rw-r--r--lib/Target/CppBackend/CPPTargetMachine.h10
-rw-r--r--lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp120
-rw-r--r--lib/Target/Hexagon/Hexagon.h67
-rwxr-xr-xlib/Target/Hexagon/HexagonAsmPrinter.h2
-rw-r--r--lib/Target/Hexagon/HexagonCFGOptimizer.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonCopyToCombine.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonExpandCondsets.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonFixupHwLoops.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.h2
-rw-r--r--lib/Target/Hexagon/HexagonHardwareLoops.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonISelDAGToDAG.cpp15
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.cpp9
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.h2
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp13
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.h18
-rw-r--r--lib/Target/Hexagon/HexagonIntrinsics.td27
-rw-r--r--lib/Target/Hexagon/HexagonMachineFunctionInfo.h2
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.h2
-rw-r--r--lib/Target/Hexagon/HexagonNewValueJump.cpp1
-rw-r--r--lib/Target/Hexagon/HexagonOperands.td87
-rw-r--r--lib/Target/Hexagon/HexagonPeephole.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonSelectionDAGInfo.h2
-rw-r--r--lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp7
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.cpp4
-rw-r--r--lib/Target/Hexagon/HexagonSubtarget.h2
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.cpp20
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.h2
-rw-r--r--lib/Target/Hexagon/HexagonTargetStreamer.h31
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp3
-rw-r--r--lib/Target/Hexagon/LLVMBuild.txt12
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt1
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp29
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp504
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp41
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h6
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp76
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp3
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp152
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.h45
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp17
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h19
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h2
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp118
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h7
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h2
-rw-r--r--lib/Target/LLVMBuild.txt2
-rw-r--r--lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h2
-rw-r--r--lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp4
-rw-r--r--lib/Target/MSP430/MSP430.h4
-rw-r--r--lib/Target/MSP430/MSP430BranchSelector.cpp2
-rw-r--r--lib/Target/MSP430/MSP430FrameLowering.h2
-rw-r--r--lib/Target/MSP430/MSP430ISelDAGToDAG.cpp2
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.h2
-rw-r--r--lib/Target/MSP430/MSP430InstrInfo.cpp2
-rw-r--r--lib/Target/MSP430/MSP430InstrInfo.h7
-rw-r--r--lib/Target/MSP430/MSP430MCInstLower.h2
-rw-r--r--lib/Target/MSP430/MSP430MachineFunctionInfo.h2
-rw-r--r--lib/Target/MSP430/MSP430SelectionDAGInfo.h2
-rw-r--r--lib/Target/MSP430/MSP430Subtarget.cpp2
-rw-r--r--lib/Target/MSP430/MSP430Subtarget.h4
-rw-r--r--lib/Target/MSP430/MSP430TargetMachine.cpp2
-rw-r--r--lib/Target/MSP430/MSP430TargetMachine.h4
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp380
-rw-r--r--lib/Target/Mips/LLVMBuild.txt13
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsABIInfo.h4
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp27
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h4
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp60
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp10
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h21
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp2
-rw-r--r--lib/Target/Mips/MicroMips32r6InstrFormats.td19
-rw-r--r--lib/Target/Mips/MicroMips32r6InstrInfo.td8
-rw-r--r--lib/Target/Mips/Mips.h2
-rw-r--r--lib/Target/Mips/Mips16FrameLowering.h2
-rw-r--r--lib/Target/Mips/Mips16HardFloat.cpp2
-rw-r--r--lib/Target/Mips/Mips16HardFloatInfo.cpp4
-rw-r--r--lib/Target/Mips/Mips16HardFloatInfo.h4
-rw-r--r--lib/Target/Mips/Mips16ISelDAGToDAG.h2
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.cpp2
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.h2
-rw-r--r--lib/Target/Mips/Mips16InstrInfo.h2
-rw-r--r--lib/Target/Mips/Mips64InstrInfo.td2
-rw-r--r--lib/Target/Mips/MipsAnalyzeImmediate.h2
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.cpp8
-rw-r--r--lib/Target/Mips/MipsAsmPrinter.h2
-rw-r--r--lib/Target/Mips/MipsCCState.h2
-rw-r--r--lib/Target/Mips/MipsFrameLowering.h2
-rw-r--r--lib/Target/Mips/MipsISelDAGToDAG.h2
-rw-r--r--lib/Target/Mips/MipsISelLowering.h4
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp5
-rw-r--r--lib/Target/Mips/MipsInstrInfo.h7
-rw-r--r--lib/Target/Mips/MipsInstrInfo.td35
-rw-r--r--lib/Target/Mips/MipsMCInstLower.h2
-rw-r--r--lib/Target/Mips/MipsModuleISelDAGToDAG.cpp2
-rw-r--r--lib/Target/Mips/MipsOs16.cpp2
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.cpp2
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.h2
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.h2
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.h2
-rw-r--r--lib/Target/Mips/MipsSEInstrInfo.h2
-rw-r--r--lib/Target/Mips/MipsSelectionDAGInfo.h2
-rw-r--r--lib/Target/Mips/MipsSubtarget.cpp4
-rw-r--r--lib/Target/Mips/MipsSubtarget.h9
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp36
-rw-r--r--lib/Target/Mips/MipsTargetMachine.h14
-rw-r--r--lib/Target/Mips/MipsTargetStreamer.h2
-rw-r--r--lib/Target/NVPTX/CMakeLists.txt1
-rw-r--r--lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h2
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h4
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp2
-rw-r--r--lib/Target/NVPTX/ManagedStringPool.h2
-rw-r--r--lib/Target/NVPTX/NVPTX.h11
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.cpp7
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.h2
-rw-r--r--lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp116
-rw-r--r--lib/Target/NVPTX/NVPTXFrameLowering.h2
-rw-r--r--lib/Target/NVPTX/NVPTXISelDAGToDAG.h2
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.h2
-rw-r--r--lib/Target/NVPTX/NVPTXImageOptimizer.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXInstrInfo.h2
-rw-r--r--lib/Target/NVPTX/NVPTXLowerAlloca.cpp115
-rw-r--r--lib/Target/NVPTX/NVPTXMachineFunctionInfo.h2
-rw-r--r--lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXRegisterInfo.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXSubtarget.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXSubtarget.h4
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.cpp31
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.h11
-rw-r--r--lib/Target/NVPTX/NVPTXUtilities.h2
-rw-r--r--lib/Target/NVPTX/NVVMReflect.cpp2
-rw-r--r--lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp7
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp8
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp2
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h4
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp2
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp8
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h5
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp2
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h4
-rw-r--r--lib/Target/PowerPC/PPC.h2
-rw-r--r--lib/Target/PowerPC/PPC.td79
-rw-r--r--lib/Target/PowerPC/PPCAsmPrinter.cpp7
-rw-r--r--lib/Target/PowerPC/PPCBranchSelector.cpp2
-rw-r--r--lib/Target/PowerPC/PPCCTRLoops.cpp4
-rw-r--r--lib/Target/PowerPC/PPCCallingConv.h2
-rw-r--r--lib/Target/PowerPC/PPCEarlyReturn.cpp2
-rw-r--r--lib/Target/PowerPC/PPCFastISel.cpp2
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.h2
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp44
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h6
-rw-r--r--lib/Target/PowerPC/PPCInstrAltivec.td4
-rw-r--r--lib/Target/PowerPC/PPCInstrBuilder.h2
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp17
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.h24
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.td2
-rw-r--r--lib/Target/PowerPC/PPCLoopDataPrefetch.cpp2
-rw-r--r--lib/Target/PowerPC/PPCLoopPreIncPrep.cpp4
-rw-r--r--lib/Target/PowerPC/PPCMCInstLower.cpp2
-rw-r--r--lib/Target/PowerPC/PPCSelectionDAGInfo.h2
-rw-r--r--lib/Target/PowerPC/PPCSubtarget.cpp6
-rw-r--r--lib/Target/PowerPC/PPCSubtarget.h14
-rw-r--r--lib/Target/PowerPC/PPCTLSDynamicCall.cpp2
-rw-r--r--lib/Target/PowerPC/PPCTOCRegDeps.cpp2
-rw-r--r--lib/Target/PowerPC/PPCTargetMachine.cpp37
-rw-r--r--lib/Target/PowerPC/PPCTargetMachine.h14
-rw-r--r--lib/Target/PowerPC/PPCTargetStreamer.h2
-rw-r--r--lib/Target/PowerPC/PPCVSXCopy.cpp2
-rw-r--r--lib/Target/PowerPC/PPCVSXFMAMutate.cpp2
-rw-r--r--lib/Target/PowerPC/PPCVSXSwapRemoval.cpp2
-rw-r--r--lib/Target/R600/AsmParser/CMakeLists.txt3
-rw-r--r--lib/Target/R600/CIInstructions.td42
-rw-r--r--lib/Target/R600/InstPrinter/CMakeLists.txt3
-rw-r--r--lib/Target/R600/TargetInfo/CMakeLists.txt3
-rw-r--r--lib/Target/Sparc/AsmParser/SparcAsmParser.cpp13
-rw-r--r--lib/Target/Sparc/Disassembler/SparcDisassembler.cpp2
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp6
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp27
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h4
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp7
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h5
-rw-r--r--lib/Target/Sparc/Sparc.h4
-rw-r--r--lib/Target/Sparc/SparcFrameLowering.h2
-rw-r--r--lib/Target/Sparc/SparcISelLowering.h2
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.cpp2
-rw-r--r--lib/Target/Sparc/SparcInstrInfo.h5
-rw-r--r--lib/Target/Sparc/SparcMachineFunctionInfo.h2
-rw-r--r--lib/Target/Sparc/SparcSelectionDAGInfo.h2
-rw-r--r--lib/Target/Sparc/SparcSubtarget.cpp2
-rw-r--r--lib/Target/Sparc/SparcSubtarget.h2
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.cpp21
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.h17
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp4
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp5
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h3
-rw-r--r--lib/Target/SystemZ/SystemZISelDAGToDAG.cpp4
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp4
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.cpp5
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.h6
-rw-r--r--lib/Target/SystemZ/SystemZSubtarget.cpp9
-rw-r--r--lib/Target/SystemZ/SystemZSubtarget.h2
-rw-r--r--lib/Target/SystemZ/SystemZTargetMachine.cpp11
-rw-r--r--lib/Target/SystemZ/SystemZTargetMachine.h2
-rw-r--r--lib/Target/TargetLoweringObjectFile.cpp4
-rw-r--r--lib/Target/TargetMachine.cpp3
-rw-r--r--lib/Target/TargetMachineC.cpp2
-rw-r--r--lib/Target/TargetSubtargetInfo.cpp2
-rw-r--r--lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp2
-rw-r--r--lib/Target/X86/AsmParser/X86AsmInstrumentation.h2
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp51
-rw-r--r--lib/Target/X86/Disassembler/X86Disassembler.cpp2
-rw-r--r--lib/Target/X86/InstPrinter/X86ATTInstPrinter.h2
-rw-r--r--lib/Target/X86/InstPrinter/X86IntelInstPrinter.h2
-rw-r--r--lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp10
-rw-r--r--lib/Target/X86/MCTargetDesc/X86BaseInfo.h8
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp2
-rw-r--r--lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp3
-rw-r--r--lib/Target/X86/MCTargetDesc/X86FixupKinds.h4
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp18
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h22
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp4
-rw-r--r--lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp2
-rw-r--r--lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp2
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.cpp2
-rw-r--r--lib/Target/X86/Utils/X86ShuffleDecode.h2
-rw-r--r--lib/Target/X86/X86.h2
-rw-r--r--lib/Target/X86/X86AsmPrinter.cpp11
-rw-r--r--lib/Target/X86/X86AsmPrinter.h6
-rw-r--r--lib/Target/X86/X86CallFrameOptimization.cpp2
-rw-r--r--lib/Target/X86/X86CallingConv.h2
-rw-r--r--lib/Target/X86/X86ExpandPseudo.cpp14
-rw-r--r--lib/Target/X86/X86FixupLEAs.cpp4
-rw-r--r--lib/Target/X86/X86FloatingPoint.cpp4
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp495
-rw-r--r--lib/Target/X86/X86FrameLowering.h64
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp4
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp349
-rw-r--r--lib/Target/X86/X86ISelLowering.h16
-rw-r--r--lib/Target/X86/X86InstrAVX512.td318
-rw-r--r--lib/Target/X86/X86InstrBuilder.h2
-rw-r--r--lib/Target/X86/X86InstrFragmentsSIMD.td10
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp338
-rw-r--r--lib/Target/X86/X86InstrInfo.h62
-rw-r--r--lib/Target/X86/X86InstrSSE.td15
-rw-r--r--lib/Target/X86/X86IntrinsicsInfo.h61
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp102
-rw-r--r--lib/Target/X86/X86MachineFunctionInfo.h2
-rw-r--r--lib/Target/X86/X86PadShortFunction.cpp2
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp18
-rw-r--r--lib/Target/X86/X86RegisterInfo.h4
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.h2
-rw-r--r--lib/Target/X86/X86Subtarget.cpp5
-rw-r--r--lib/Target/X86/X86Subtarget.h7
-rw-r--r--lib/Target/X86/X86TargetMachine.cpp21
-rw-r--r--lib/Target/X86/X86TargetMachine.h6
-rw-r--r--lib/Target/X86/X86TargetTransformInfo.cpp20
-rw-r--r--lib/Target/X86/X86VZeroUpper.cpp2
-rw-r--r--lib/Target/X86/X86WinEHState.cpp99
-rw-r--r--lib/Target/XCore/Disassembler/XCoreDisassembler.cpp2
-rw-r--r--lib/Target/XCore/LLVMBuild.txt14
-rw-r--r--lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp6
-rw-r--r--lib/Target/XCore/XCore.h2
-rw-r--r--lib/Target/XCore/XCoreFrameLowering.h2
-rw-r--r--lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp2
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h4
-rw-r--r--lib/Target/XCore/XCoreInstrInfo.cpp4
-rw-r--r--lib/Target/XCore/XCoreInstrInfo.h5
-rw-r--r--lib/Target/XCore/XCoreLowerThreadLocal.cpp2
-rw-r--r--lib/Target/XCore/XCoreMCInstLower.h2
-rw-r--r--lib/Target/XCore/XCoreMachineFunctionInfo.h2
-rw-r--r--lib/Target/XCore/XCoreSelectionDAGInfo.h2
-rw-r--r--lib/Target/XCore/XCoreSubtarget.cpp2
-rw-r--r--lib/Target/XCore/XCoreSubtarget.h6
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.cpp2
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.h4
-rw-r--r--lib/Target/XCore/XCoreTargetStreamer.h2
-rw-r--r--lib/Transforms/Hello/CMakeLists.txt3
-rw-r--r--lib/Transforms/Hello/Hello.cpp4
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp22
-rw-r--r--lib/Transforms/IPO/BarrierNoopPass.cpp2
-rw-r--r--lib/Transforms/IPO/ConstantMerge.cpp2
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp4
-rw-r--r--lib/Transforms/IPO/ExtractGV.cpp2
-rw-r--r--lib/Transforms/IPO/FunctionAttrs.cpp15
-rw-r--r--lib/Transforms/IPO/GlobalDCE.cpp5
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp4
-rw-r--r--lib/Transforms/IPO/IPConstantPropagation.cpp2
-rw-r--r--lib/Transforms/IPO/InlineAlways.cpp2
-rw-r--r--lib/Transforms/IPO/Inliner.cpp31
-rw-r--r--lib/Transforms/IPO/LoopExtractor.cpp4
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp30
-rw-r--r--lib/Transforms/IPO/PartialInlining.cpp2
-rw-r--r--lib/Transforms/IPO/PassManagerBuilder.cpp4
-rw-r--r--lib/Transforms/IPO/PruneEH.cpp4
-rw-r--r--lib/Transforms/IPO/StripSymbols.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp18
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombinePHI.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp12
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp123
-rw-r--r--lib/Transforms/Instrumentation/BoundsChecking.cpp2
-rw-r--r--lib/Transforms/Instrumentation/CMakeLists.txt1
-rw-r--r--lib/Transforms/Instrumentation/DataFlowSanitizer.cpp2
-rw-r--r--lib/Transforms/Instrumentation/GCOVProfiling.cpp4
-rw-r--r--lib/Transforms/Instrumentation/Instrumentation.cpp1
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp2
-rw-r--r--lib/Transforms/Instrumentation/SafeStack.cpp608
-rw-r--r--lib/Transforms/Instrumentation/SanitizerCoverage.cpp12
-rw-r--r--lib/Transforms/ObjCARC/BlotMapVector.h2
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCAPElim.cpp2
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp23
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h8
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCContract.cpp6
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCExpand.cpp2
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCOpts.cpp4
-rw-r--r--lib/Transforms/Scalar/ADCE.cpp2
-rw-r--r--lib/Transforms/Scalar/AlignmentFromAssumptions.cpp2
-rw-r--r--lib/Transforms/Scalar/BDCE.cpp2
-rw-r--r--lib/Transforms/Scalar/ConstantHoisting.cpp2
-rw-r--r--lib/Transforms/Scalar/ConstantProp.cpp2
-rw-r--r--lib/Transforms/Scalar/CorrelatedValuePropagation.cpp2
-rw-r--r--lib/Transforms/Scalar/DCE.cpp4
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp60
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp12
-rw-r--r--lib/Transforms/Scalar/FlattenCFGPass.cpp2
-rw-r--r--lib/Transforms/Scalar/Float2Int.cpp2
-rw-r--r--lib/Transforms/Scalar/GVN.cpp55
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp15
-rw-r--r--lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp4
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp70
-rw-r--r--lib/Transforms/Scalar/LICM.cpp4
-rw-r--r--lib/Transforms/Scalar/LoadCombine.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopDeletion.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopDistribute.cpp40
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp6
-rw-r--r--lib/Transforms/Scalar/LoopInstSimplify.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopInterchange.cpp8
-rw-r--r--lib/Transforms/Scalar/LoopRerollPass.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp18
-rw-r--r--lib/Transforms/Scalar/LoopUnrollPass.cpp18
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp2
-rw-r--r--lib/Transforms/Scalar/LowerAtomic.cpp2
-rw-r--r--lib/Transforms/Scalar/LowerExpectIntrinsic.cpp2
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp19
-rw-r--r--lib/Transforms/Scalar/MergedLoadStoreMotion.cpp24
-rw-r--r--lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp2
-rw-r--r--lib/Transforms/Scalar/PlaceSafepoints.cpp4
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp4
-rw-r--r--lib/Transforms/Scalar/Reg2Mem.cpp2
-rw-r--r--lib/Transforms/Scalar/RewriteStatepointsForGC.cpp19
-rw-r--r--lib/Transforms/Scalar/SROA.cpp12
-rw-r--r--lib/Transforms/Scalar/SampleProfile.cpp4
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp4
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp2
-rw-r--r--lib/Transforms/Scalar/Sink.cpp2
-rw-r--r--lib/Transforms/Scalar/StraightLineStrengthReduce.cpp13
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp7
-rw-r--r--lib/Transforms/Utils/ASanStackFrameLayout.cpp2
-rw-r--r--lib/Transforms/Utils/BasicBlockUtils.cpp7
-rw-r--r--lib/Transforms/Utils/BreakCriticalEdges.cpp2
-rw-r--r--lib/Transforms/Utils/BypassSlowDivision.cpp4
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp2
-rw-r--r--lib/Transforms/Utils/CtorUtils.cpp2
-rw-r--r--lib/Transforms/Utils/FlattenCFG.cpp2
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp40
-rw-r--r--lib/Transforms/Utils/InstructionNamer.cpp2
-rw-r--r--lib/Transforms/Utils/LCSSA.cpp2
-rw-r--r--lib/Transforms/Utils/Local.cpp92
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp4
-rw-r--r--lib/Transforms/Utils/LoopUnrollRuntime.cpp19
-rw-r--r--lib/Transforms/Utils/LoopUtils.cpp129
-rw-r--r--lib/Transforms/Utils/LowerSwitch.cpp29
-rw-r--r--lib/Transforms/Utils/MetaRenamer.cpp2
-rw-r--r--lib/Transforms/Utils/SSAUpdater.cpp2
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp8
-rw-r--r--lib/Transforms/Utils/SimplifyIndVar.cpp2
-rw-r--r--lib/Transforms/Utils/SimplifyInstructions.cpp2
-rw-r--r--lib/Transforms/Utils/SymbolRewriter.cpp2
-rw-r--r--lib/Transforms/Vectorize/BBVectorize.cpp2
-rw-r--r--lib/Transforms/Vectorize/LoopVectorize.cpp34
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp16
931 files changed, 10254 insertions, 6272 deletions
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index ce46d5300517..d44653e8c9c1 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -48,23 +48,22 @@ char AliasAnalysis::ID = 0;
// Default chaining methods
//===----------------------------------------------------------------------===//
-AliasAnalysis::AliasResult
-AliasAnalysis::alias(const Location &LocA, const Location &LocB) {
+AliasAnalysis::AliasResult AliasAnalysis::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->alias(LocA, LocB);
}
-bool AliasAnalysis::pointsToConstantMemory(const Location &Loc,
+bool AliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->pointsToConstantMemory(Loc, OrLocal);
}
-AliasAnalysis::Location
-AliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- AliasAnalysis::ModRefResult &Mask) {
+AliasAnalysis::ModRefResult
+AliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->getArgLocation(CS, ArgIdx, Mask);
+ return AA->getArgModRefInfo(CS, ArgIdx);
}
void AliasAnalysis::deleteValue(Value *V) {
@@ -93,7 +92,7 @@ AliasAnalysis::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
// location this memory access defines. The best we can say
// is that if the call references what this instruction
// defines, it must be clobbered by this location.
- const AliasAnalysis::Location DefLoc = MemoryLocation::get(I);
+ const MemoryLocation DefLoc = MemoryLocation::get(I);
if (getModRefInfo(Call, DefLoc) != AliasAnalysis::NoModRef)
return AliasAnalysis::ModRef;
}
@@ -101,8 +100,7 @@ AliasAnalysis::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+AliasAnalysis::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
ModRefBehavior MRB = getModRefBehavior(CS);
@@ -122,11 +120,11 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
const Value *Arg = *AI;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CSLoc =
- getArgLocation(CS, (unsigned) std::distance(CS.arg_begin(), AI),
- ArgMask);
- if (!isNoAlias(CSLoc, Loc)) {
+ unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
+ MemoryLocation ArgLoc =
+ MemoryLocation::getForArgument(CS, ArgIdx, *TLI);
+ if (!isNoAlias(ArgLoc, Loc)) {
+ ModRefResult ArgMask = getArgModRefInfo(CS, ArgIdx);
doesAlias = true;
AllArgsMask = ModRefResult(AllArgsMask | ArgMask);
}
@@ -183,18 +181,18 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CS2Loc =
- getArgLocation(CS2, (unsigned) std::distance(CS2.arg_begin(), I),
- ArgMask);
- // ArgMask indicates what CS2 might do to CS2Loc, and the dependence of
+ unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
+ auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, *TLI);
+
+ // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence of
// CS1 on that location is the inverse.
+ ModRefResult ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
if (ArgMask == Mod)
ArgMask = ModRef;
else if (ArgMask == Ref)
ArgMask = Mod;
- R = ModRefResult((R | (getModRefInfo(CS1, CS2Loc) & ArgMask)) & Mask);
+ R = ModRefResult((R | (getModRefInfo(CS1, CS2ArgLoc) & ArgMask)) & Mask);
if (R == Mask)
break;
}
@@ -212,13 +210,14 @@ AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CS1Loc = getArgLocation(
- CS1, (unsigned)std::distance(CS1.arg_begin(), I), ArgMask);
- // ArgMask indicates what CS1 might do to CS1Loc; if CS1 might Mod
- // CS1Loc, then we care about either a Mod or a Ref by CS2. If CS1
+ unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
+ auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, *TLI);
+
+ // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
+ // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
// might Ref, then we care only about a Mod by CS2.
- ModRefResult ArgR = getModRefInfo(CS2, CS1Loc);
+ ModRefResult ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
+ ModRefResult ArgR = getModRefInfo(CS2, CS1ArgLoc);
if (((ArgMask & Mod) != NoModRef && (ArgR & ModRef) != NoModRef) ||
((ArgMask & Ref) != NoModRef && (ArgR & Mod) != NoModRef))
R = ModRefResult((R | ArgMask) & Mask);
@@ -268,7 +267,7 @@ AliasAnalysis::getModRefBehavior(const Function *F) {
//===----------------------------------------------------------------------===//
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const LoadInst *L, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const LoadInst *L, const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!L->isUnordered())
return ModRef;
@@ -283,7 +282,7 @@ AliasAnalysis::getModRefInfo(const LoadInst *L, const Location &Loc) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const StoreInst *S, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const StoreInst *S, const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!S->isUnordered())
return ModRef;
@@ -306,7 +305,7 @@ AliasAnalysis::getModRefInfo(const StoreInst *S, const Location &Loc) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const VAArgInst *V, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc) {
if (Loc.Ptr) {
// If the va_arg address cannot alias the pointer in question, then the
@@ -325,7 +324,8 @@ AliasAnalysis::getModRefInfo(const VAArgInst *V, const Location &Loc) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX,
+ const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (CX->getSuccessOrdering() > Monotonic)
return ModRef;
@@ -338,7 +338,8 @@ AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX, const Location &Loc) {
}
AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW, const Location &Loc) {
+AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW,
+ const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (RMW->getOrdering() > Monotonic)
return ModRef;
@@ -354,10 +355,8 @@ AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW, const Location &Loc) {
// BasicAA isn't willing to spend linear time determining whether an alloca
// was captured before or after this particular call, while we are. However,
// with a smarter AA in place, this test is just wasting compile time.
-AliasAnalysis::ModRefResult
-AliasAnalysis::callCapturesBefore(const Instruction *I,
- const AliasAnalysis::Location &MemLoc,
- DominatorTree *DT) {
+AliasAnalysis::ModRefResult AliasAnalysis::callCapturesBefore(
+ const Instruction *I, const MemoryLocation &MemLoc, DominatorTree *DT) {
if (!DT)
return AliasAnalysis::ModRef;
@@ -390,8 +389,7 @@ AliasAnalysis::callCapturesBefore(const Instruction *I,
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
- if (isNoAlias(AliasAnalysis::Location(*CI),
- AliasAnalysis::Location(Object)))
+ if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object)))
continue;
if (CS.doesNotAccessMemory(ArgNo))
continue;
@@ -431,14 +429,14 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// if known, or a conservative value otherwise.
///
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
- return DL ? DL->getTypeStoreSize(Ty) : UnknownSize;
+ return DL ? DL->getTypeStoreSize(Ty) : MemoryLocation::UnknownSize;
}
/// canBasicBlockModify - Return true if it is possible for execution of the
/// specified basic block to modify the location Loc.
///
bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
return canInstructionRangeModRef(BB.front(), BB.back(), Loc, Mod);
}
@@ -449,7 +447,7 @@ bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
/// I1 and I2 must be in the same basic block.
bool AliasAnalysis::canInstructionRangeModRef(const Instruction &I1,
const Instruction &I2,
- const Location &Loc,
+ const MemoryLocation &Loc,
const ModRefResult Mode) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
diff --git a/lib/Analysis/AliasAnalysisCounter.cpp b/lib/Analysis/AliasAnalysisCounter.cpp
index a1bfba1f0026..0112186720bd 100644
--- a/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/lib/Analysis/AliasAnalysisCounter.cpp
@@ -98,22 +98,24 @@ namespace {
}
// FIXME: We could count these too...
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override {
return getAnalysis<AliasAnalysis>().pointsToConstantMemory(Loc, OrLocal);
}
// Forwarding functions: just delegate to a real AA implementation, counting
// the number of responses...
- AliasResult alias(const Location &LocA, const Location &LocB) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
};
-}
+} // namespace
char AliasAnalysisCounter::ID = 0;
INITIALIZE_AG_PASS(AliasAnalysisCounter, AliasAnalysis, "count-aa",
@@ -124,7 +126,8 @@ ModulePass *llvm::createAliasAnalysisCounterPass() {
}
AliasAnalysis::AliasResult
-AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
+AliasAnalysisCounter::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
AliasResult R = getAnalysis<AliasAnalysis>().alias(LocA, LocB);
const char *AliasString = nullptr;
@@ -150,7 +153,7 @@ AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
AliasAnalysis::ModRefResult
AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, Loc);
const char *MRString = nullptr;
diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp
index dd6a3a0715e1..1501b5f64aa6 100644
--- a/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -47,8 +47,8 @@ static cl::opt<bool> EvalAAMD("evaluate-aa-metadata", cl::ReallyHidden);
namespace {
class AAEval : public FunctionPass {
- unsigned NoAlias, MayAlias, PartialAlias, MustAlias;
- unsigned NoModRef, Mod, Ref, ModRef;
+ unsigned NoAliasCount, MayAliasCount, PartialAliasCount, MustAliasCount;
+ unsigned NoModRefCount, ModCount, RefCount, ModRefCount;
public:
static char ID; // Pass identification, replacement for typeid
@@ -62,8 +62,8 @@ namespace {
}
bool doInitialization(Module &M) override {
- NoAlias = MayAlias = PartialAlias = MustAlias = 0;
- NoModRef = Mod = Ref = ModRef = 0;
+ NoAliasCount = MayAliasCount = PartialAliasCount = MustAliasCount = 0;
+ NoModRefCount = ModCount = RefCount = ModRefCount = 0;
if (PrintAll) {
PrintNoAlias = PrintMayAlias = true;
@@ -76,7 +76,7 @@ namespace {
bool runOnFunction(Function &F) override;
bool doFinalization(Module &M) override;
};
-}
+} // namespace
char AAEval::ID = 0;
INITIALIZE_PASS_BEGIN(AAEval, "aa-eval",
@@ -186,29 +186,33 @@ bool AAEval::runOnFunction(Function &F) {
// iterate over the worklist, and run the full (n^2)/2 disambiguations
for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
I1 != E; ++I1) {
- uint64_t I1Size = AliasAnalysis::UnknownSize;
+ uint64_t I1Size = MemoryLocation::UnknownSize;
Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
if (I1ElTy->isSized()) I1Size = AA.getTypeStoreSize(I1ElTy);
for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
- uint64_t I2Size = AliasAnalysis::UnknownSize;
+ uint64_t I2Size = MemoryLocation::UnknownSize;
Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
if (I2ElTy->isSized()) I2Size = AA.getTypeStoreSize(I2ElTy);
switch (AA.alias(*I1, I1Size, *I2, I2Size)) {
case AliasAnalysis::NoAlias:
PrintResults("NoAlias", PrintNoAlias, *I1, *I2, F.getParent());
- ++NoAlias; break;
+ ++NoAliasCount;
+ break;
case AliasAnalysis::MayAlias:
PrintResults("MayAlias", PrintMayAlias, *I1, *I2, F.getParent());
- ++MayAlias; break;
+ ++MayAliasCount;
+ break;
case AliasAnalysis::PartialAlias:
PrintResults("PartialAlias", PrintPartialAlias, *I1, *I2,
F.getParent());
- ++PartialAlias; break;
+ ++PartialAliasCount;
+ break;
case AliasAnalysis::MustAlias:
PrintResults("MustAlias", PrintMustAlias, *I1, *I2, F.getParent());
- ++MustAlias; break;
+ ++MustAliasCount;
+ break;
}
}
}
@@ -224,19 +228,23 @@ bool AAEval::runOnFunction(Function &F) {
case AliasAnalysis::NoAlias:
PrintLoadStoreResults("NoAlias", PrintNoAlias, *I1, *I2,
F.getParent());
- ++NoAlias; break;
+ ++NoAliasCount;
+ break;
case AliasAnalysis::MayAlias:
PrintLoadStoreResults("MayAlias", PrintMayAlias, *I1, *I2,
F.getParent());
- ++MayAlias; break;
+ ++MayAliasCount;
+ break;
case AliasAnalysis::PartialAlias:
PrintLoadStoreResults("PartialAlias", PrintPartialAlias, *I1, *I2,
F.getParent());
- ++PartialAlias; break;
+ ++PartialAliasCount;
+ break;
case AliasAnalysis::MustAlias:
PrintLoadStoreResults("MustAlias", PrintMustAlias, *I1, *I2,
F.getParent());
- ++MustAlias; break;
+ ++MustAliasCount;
+ break;
}
}
}
@@ -250,19 +258,23 @@ bool AAEval::runOnFunction(Function &F) {
case AliasAnalysis::NoAlias:
PrintLoadStoreResults("NoAlias", PrintNoAlias, *I1, *I2,
F.getParent());
- ++NoAlias; break;
+ ++NoAliasCount;
+ break;
case AliasAnalysis::MayAlias:
PrintLoadStoreResults("MayAlias", PrintMayAlias, *I1, *I2,
F.getParent());
- ++MayAlias; break;
+ ++MayAliasCount;
+ break;
case AliasAnalysis::PartialAlias:
PrintLoadStoreResults("PartialAlias", PrintPartialAlias, *I1, *I2,
F.getParent());
- ++PartialAlias; break;
+ ++PartialAliasCount;
+ break;
case AliasAnalysis::MustAlias:
PrintLoadStoreResults("MustAlias", PrintMustAlias, *I1, *I2,
F.getParent());
- ++MustAlias; break;
+ ++MustAliasCount;
+ break;
}
}
}
@@ -275,23 +287,27 @@ bool AAEval::runOnFunction(Function &F) {
for (SetVector<Value *>::iterator V = Pointers.begin(), Ve = Pointers.end();
V != Ve; ++V) {
- uint64_t Size = AliasAnalysis::UnknownSize;
+ uint64_t Size = MemoryLocation::UnknownSize;
Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
if (ElTy->isSized()) Size = AA.getTypeStoreSize(ElTy);
switch (AA.getModRefInfo(*C, *V, Size)) {
case AliasAnalysis::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, I, *V, F.getParent());
- ++NoModRef; break;
+ ++NoModRefCount;
+ break;
case AliasAnalysis::Mod:
PrintModRefResults("Just Mod", PrintMod, I, *V, F.getParent());
- ++Mod; break;
+ ++ModCount;
+ break;
case AliasAnalysis::Ref:
PrintModRefResults("Just Ref", PrintRef, I, *V, F.getParent());
- ++Ref; break;
+ ++RefCount;
+ break;
case AliasAnalysis::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, I, *V, F.getParent());
- ++ModRef; break;
+ ++ModRefCount;
+ break;
}
}
}
@@ -305,16 +321,20 @@ bool AAEval::runOnFunction(Function &F) {
switch (AA.getModRefInfo(*C, *D)) {
case AliasAnalysis::NoModRef:
PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent());
- ++NoModRef; break;
+ ++NoModRefCount;
+ break;
case AliasAnalysis::Mod:
PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent());
- ++Mod; break;
+ ++ModCount;
+ break;
case AliasAnalysis::Ref:
PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent());
- ++Ref; break;
+ ++RefCount;
+ break;
case AliasAnalysis::ModRef:
PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent());
- ++ModRef; break;
+ ++ModRefCount;
+ break;
}
}
}
@@ -328,43 +348,47 @@ static void PrintPercent(unsigned Num, unsigned Sum) {
}
bool AAEval::doFinalization(Module &M) {
- unsigned AliasSum = NoAlias + MayAlias + PartialAlias + MustAlias;
+ unsigned AliasSum =
+ NoAliasCount + MayAliasCount + PartialAliasCount + MustAliasCount;
errs() << "===== Alias Analysis Evaluator Report =====\n";
if (AliasSum == 0) {
errs() << " Alias Analysis Evaluator Summary: No pointers!\n";
} else {
errs() << " " << AliasSum << " Total Alias Queries Performed\n";
- errs() << " " << NoAlias << " no alias responses ";
- PrintPercent(NoAlias, AliasSum);
- errs() << " " << MayAlias << " may alias responses ";
- PrintPercent(MayAlias, AliasSum);
- errs() << " " << PartialAlias << " partial alias responses ";
- PrintPercent(PartialAlias, AliasSum);
- errs() << " " << MustAlias << " must alias responses ";
- PrintPercent(MustAlias, AliasSum);
+ errs() << " " << NoAliasCount << " no alias responses ";
+ PrintPercent(NoAliasCount, AliasSum);
+ errs() << " " << MayAliasCount << " may alias responses ";
+ PrintPercent(MayAliasCount, AliasSum);
+ errs() << " " << PartialAliasCount << " partial alias responses ";
+ PrintPercent(PartialAliasCount, AliasSum);
+ errs() << " " << MustAliasCount << " must alias responses ";
+ PrintPercent(MustAliasCount, AliasSum);
errs() << " Alias Analysis Evaluator Pointer Alias Summary: "
- << NoAlias*100/AliasSum << "%/" << MayAlias*100/AliasSum << "%/"
- << PartialAlias*100/AliasSum << "%/"
- << MustAlias*100/AliasSum << "%\n";
+ << NoAliasCount * 100 / AliasSum << "%/"
+ << MayAliasCount * 100 / AliasSum << "%/"
+ << PartialAliasCount * 100 / AliasSum << "%/"
+ << MustAliasCount * 100 / AliasSum << "%\n";
}
// Display the summary for mod/ref analysis
- unsigned ModRefSum = NoModRef + Mod + Ref + ModRef;
+ unsigned ModRefSum = NoModRefCount + ModCount + RefCount + ModRefCount;
if (ModRefSum == 0) {
- errs() << " Alias Analysis Mod/Ref Evaluator Summary: no mod/ref!\n";
+ errs() << " Alias Analysis Mod/Ref Evaluator Summary: no "
+ "mod/ref!\n";
} else {
errs() << " " << ModRefSum << " Total ModRef Queries Performed\n";
- errs() << " " << NoModRef << " no mod/ref responses ";
- PrintPercent(NoModRef, ModRefSum);
- errs() << " " << Mod << " mod responses ";
- PrintPercent(Mod, ModRefSum);
- errs() << " " << Ref << " ref responses ";
- PrintPercent(Ref, ModRefSum);
- errs() << " " << ModRef << " mod & ref responses ";
- PrintPercent(ModRef, ModRefSum);
+ errs() << " " << NoModRefCount << " no mod/ref responses ";
+ PrintPercent(NoModRefCount, ModRefSum);
+ errs() << " " << ModCount << " mod responses ";
+ PrintPercent(ModCount, ModRefSum);
+ errs() << " " << RefCount << " ref responses ";
+ PrintPercent(RefCount, ModRefSum);
+ errs() << " " << ModRefCount << " mod & ref responses ";
+ PrintPercent(ModRefCount, ModRefSum);
errs() << " Alias Analysis Evaluator Mod/Ref Summary: "
- << NoModRef*100/ModRefSum << "%/" << Mod*100/ModRefSum << "%/"
- << Ref*100/ModRefSum << "%/" << ModRef*100/ModRefSum << "%\n";
+ << NoModRefCount * 100 / ModRefSum << "%/"
+ << ModCount * 100 / ModRefSum << "%/" << RefCount * 100 / ModRefSum
+ << "%/" << ModRefCount * 100 / ModRefSum << "%\n";
}
return false;
diff --git a/lib/Analysis/AliasDebugger.cpp b/lib/Analysis/AliasDebugger.cpp
index f98b57819609..fde0eeb43d48 100644
--- a/lib/Analysis/AliasDebugger.cpp
+++ b/lib/Analysis/AliasDebugger.cpp
@@ -94,7 +94,8 @@ namespace {
//------------------------------------------------
// Implement the AliasAnalysis API
//
- AliasResult alias(const Location &LocA, const Location &LocB) override {
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override {
assert(Vals.find(LocA.Ptr) != Vals.end() &&
"Never seen value in AA before");
assert(Vals.find(LocB.Ptr) != Vals.end() &&
@@ -103,7 +104,7 @@ namespace {
}
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override {
+ const MemoryLocation &Loc) override {
assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
return AliasAnalysis::getModRefInfo(CS, Loc);
}
@@ -113,7 +114,8 @@ namespace {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override {
assert(Vals.find(Loc.Ptr) != Vals.end() && "Never seen value in AA before");
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
}
@@ -128,7 +130,7 @@ namespace {
}
};
-}
+} // namespace
char AliasDebugger::ID = 0;
INITIALIZE_AG_PASS(AliasDebugger, AliasAnalysis, "debug-aa",
diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp
index 12c1c7d4af90..f7a803c5f4ce 100644
--- a/lib/Analysis/AliasSetTracker.cpp
+++ b/lib/Analysis/AliasSetTracker.cpp
@@ -45,13 +45,9 @@ void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
PointerRec *R = AS.getSomePointer();
// If the pointers are not a must-alias pair, this set becomes a may alias.
- if (AA.alias(AliasAnalysis::Location(L->getValue(),
- L->getSize(),
- L->getAAInfo()),
- AliasAnalysis::Location(R->getValue(),
- R->getSize(),
- R->getAAInfo()))
- != AliasAnalysis::MustAlias)
+ if (AA.alias(MemoryLocation(L->getValue(), L->getSize(), L->getAAInfo()),
+ MemoryLocation(R->getValue(), R->getSize(), R->getAAInfo())) !=
+ AliasAnalysis::MustAlias)
AliasTy = MayAlias;
}
@@ -106,9 +102,8 @@ void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
if (PointerRec *P = getSomePointer()) {
AliasAnalysis &AA = AST.getAliasAnalysis();
AliasAnalysis::AliasResult Result =
- AA.alias(AliasAnalysis::Location(P->getValue(), P->getSize(),
- P->getAAInfo()),
- AliasAnalysis::Location(Entry.getValue(), Size, AAInfo));
+ AA.alias(MemoryLocation(P->getValue(), P->getSize(), P->getAAInfo()),
+ MemoryLocation(Entry.getValue(), Size, AAInfo));
if (Result != AliasAnalysis::MustAlias)
AliasTy = MayAlias;
else // First entry of must alias must have maximum size!
@@ -156,26 +151,24 @@ bool AliasSet::aliasesPointer(const Value *Ptr, uint64_t Size,
// SOME value in the set.
PointerRec *SomePtr = getSomePointer();
assert(SomePtr && "Empty must-alias set??");
- return AA.alias(AliasAnalysis::Location(SomePtr->getValue(),
- SomePtr->getSize(),
- SomePtr->getAAInfo()),
- AliasAnalysis::Location(Ptr, Size, AAInfo));
+ return AA.alias(MemoryLocation(SomePtr->getValue(), SomePtr->getSize(),
+ SomePtr->getAAInfo()),
+ MemoryLocation(Ptr, Size, AAInfo));
}
// If this is a may-alias set, we have to check all of the pointers in the set
// to be sure it doesn't alias the set...
for (iterator I = begin(), E = end(); I != E; ++I)
- if (AA.alias(AliasAnalysis::Location(Ptr, Size, AAInfo),
- AliasAnalysis::Location(I.getPointer(), I.getSize(),
- I.getAAInfo())))
+ if (AA.alias(MemoryLocation(Ptr, Size, AAInfo),
+ MemoryLocation(I.getPointer(), I.getSize(), I.getAAInfo())))
return true;
// Check the unknown instructions...
if (!UnknownInsts.empty()) {
for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i)
if (AA.getModRefInfo(UnknownInsts[i],
- AliasAnalysis::Location(Ptr, Size, AAInfo)) !=
- AliasAnalysis::NoModRef)
+ MemoryLocation(Ptr, Size, AAInfo)) !=
+ AliasAnalysis::NoModRef)
return true;
}
@@ -196,10 +189,9 @@ bool AliasSet::aliasesUnknownInst(const Instruction *Inst,
}
for (iterator I = begin(), E = end(); I != E; ++I)
- if (AA.getModRefInfo(Inst, AliasAnalysis::Location(I.getPointer(),
- I.getSize(),
- I.getAAInfo())) !=
- AliasAnalysis::NoModRef)
+ if (AA.getModRefInfo(
+ Inst, MemoryLocation(I.getPointer(), I.getSize(), I.getAAInfo())) !=
+ AliasAnalysis::NoModRef)
return true;
return false;
@@ -345,8 +337,8 @@ bool AliasSetTracker::add(VAArgInst *VAAI) {
VAAI->getAAMetadata(AAInfo);
bool NewPtr;
- addPointer(VAAI->getOperand(0), AliasAnalysis::UnknownSize,
- AAInfo, AliasSet::ModRef, NewPtr);
+ addPointer(VAAI->getOperand(0), MemoryLocation::UnknownSize, AAInfo,
+ AliasSet::ModRef, NewPtr);
return NewPtr;
}
@@ -479,7 +471,7 @@ bool AliasSetTracker::remove(VAArgInst *VAAI) {
VAAI->getAAMetadata(AAInfo);
AliasSet *AS = findAliasSetForPointer(VAAI->getOperand(0),
- AliasAnalysis::UnknownSize, AAInfo);
+ MemoryLocation::UnknownSize, AAInfo);
if (!AS) return false;
remove(*AS);
return true;
@@ -674,7 +666,7 @@ namespace {
return false;
}
};
-}
+} // namespace
char AliasSetPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(AliasSetPrinter, "print-alias-sets",
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index a61faca2e54e..d11a748e4bf9 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -105,7 +105,7 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
uint64_t Size;
if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
return Size;
- return AliasAnalysis::UnknownSize;
+ return MemoryLocation::UnknownSize;
}
/// isObjectSmallerThan - Return true if we can prove that the object specified
@@ -146,7 +146,7 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
// reads a bit past the end given sufficient alignment.
uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true);
- return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
+ return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
}
/// isObjectSize - Return true if we can prove that the object specified
@@ -154,7 +154,7 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
static bool isObjectSize(const Value *V, uint64_t Size,
const DataLayout &DL, const TargetLibraryInfo &TLI) {
uint64_t ObjectSize = getObjectSize(V, DL, TLI);
- return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
+ return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
}
//===----------------------------------------------------------------------===//
@@ -182,7 +182,7 @@ namespace {
return !operator==(Other);
}
};
-}
+} // namespace
/// GetLinearExpression - Analyze the specified value as a linear expression:
@@ -459,7 +459,8 @@ namespace {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
- AliasResult alias(const Location &LocA, const Location &LocB) override {
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override {
assert(AliasCache.empty() && "AliasCache must be cleared after use!");
assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
"BasicAliasAnalysis doesn't support interprocedural queries.");
@@ -475,18 +476,19 @@ namespace {
}
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override;
/// pointsToConstantMemory - Chase pointers until we find a (constant
/// global) or not.
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override;
/// Get the location associated with a pointer argument of a callsite.
- Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask) override;
+ ModRefResult getArgModRefInfo(ImmutableCallSite CS,
+ unsigned ArgIdx) override;
/// getModRefBehavior - Return the behavior when calling the given
/// call site.
@@ -508,7 +510,7 @@ namespace {
private:
// AliasCache - Track alias queries to guard against recursion.
- typedef std::pair<Location, Location> LocPair;
+ typedef std::pair<MemoryLocation, MemoryLocation> LocPair;
typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy;
AliasCacheTy AliasCache;
@@ -592,8 +594,8 @@ ImmutablePass *llvm::createBasicAliasAnalysisPass() {
/// pointsToConstantMemory - Returns whether the given pointer value
/// points to memory that is local to the function, with global constants being
/// considered local to all functions.
-bool
-BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
+bool BasicAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) {
assert(Visited.empty() && "Visited must be cleared after use!");
unsigned MaxLookup = 8;
@@ -652,6 +654,8 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
return Worklist.empty();
}
+// FIXME: This code is duplicated with MemoryLocation and should be hoisted to
+// some common utility location.
static bool isMemsetPattern16(const Function *MS,
const TargetLibraryInfo &TLI) {
if (TLI.has(LibFunc::memset_pattern16) &&
@@ -715,84 +719,33 @@ BasicAliasAnalysis::getModRefBehavior(const Function *F) {
return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
}
-AliasAnalysis::Location
-BasicAliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask) {
- Location Loc = AliasAnalysis::getArgLocation(CS, ArgIdx, Mask);
- const TargetLibraryInfo &TLI =
- getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
- if (II != nullptr)
+AliasAnalysis::ModRefResult
+BasicAliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()))
switch (II->getIntrinsicID()) {
- default: break;
+ default:
+ break;
case Intrinsic::memset:
case Intrinsic::memcpy:
- case Intrinsic::memmove: {
+ case Intrinsic::memmove:
assert((ArgIdx == 0 || ArgIdx == 1) &&
"Invalid argument index for memory intrinsic");
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
- Loc.Size = LenCI->getZExtValue();
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Memory intrinsic location pointer not argument?");
- Mask = ArgIdx ? Ref : Mod;
- break;
- }
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start: {
- assert(ArgIdx == 1 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- Loc.Size = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- break;
- }
- case Intrinsic::invariant_end: {
- assert(ArgIdx == 2 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- Loc.Size = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
- break;
- }
- case Intrinsic::arm_neon_vld1: {
- assert(ArgIdx == 0 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- // LLVM's vld1 and vst1 intrinsics currently only support a single
- // vector register.
- if (DL)
- Loc.Size = DL->getTypeStoreSize(II->getType());
- break;
- }
- case Intrinsic::arm_neon_vst1: {
- assert(ArgIdx == 0 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- if (DL)
- Loc.Size = DL->getTypeStoreSize(II->getArgOperand(1)->getType());
- break;
- }
+ return ArgIdx ? Ref : Mod;
}
// We can bound the aliasing properties of memset_pattern16 just as we can
// for memcpy/memset. This is particularly important because the
// LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
// whenever possible.
- else if (CS.getCalledFunction() &&
- isMemsetPattern16(CS.getCalledFunction(), TLI)) {
+ if (CS.getCalledFunction() &&
+ isMemsetPattern16(CS.getCalledFunction(), *TLI)) {
assert((ArgIdx == 0 || ArgIdx == 1) &&
"Invalid argument index for memset_pattern16");
- if (ArgIdx == 1)
- Loc.Size = 16;
- else if (const ConstantInt *LenCI =
- dyn_cast<ConstantInt>(CS.getArgument(2)))
- Loc.Size = LenCI->getZExtValue();
- assert(Loc.Ptr == CS.getArgument(ArgIdx) &&
- "memset_pattern16 location pointer not argument?");
- Mask = ArgIdx ? Ref : Mod;
+ return ArgIdx ? Ref : Mod;
}
// FIXME: Handle memset_pattern4 and memset_pattern8 also.
- return Loc;
+ return AliasAnalysis::getArgModRefInfo(CS, ArgIdx);
}
static bool isAssumeIntrinsic(ImmutableCallSite CS) {
@@ -814,7 +767,7 @@ bool BasicAliasAnalysis::doInitialization(Module &M) {
/// simple "address taken" analysis on local objects.
AliasAnalysis::ModRefResult
BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
@@ -850,7 +803,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
- if (!isNoAlias(Location(*CI), Location(Object))) {
+ if (!isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))) {
PassedAsArg = true;
break;
}
@@ -902,8 +855,8 @@ aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size,
// If we don't know the size of the accesses through both GEPs, we can't
// determine whether the struct fields accessed can't alias.
- if (V1Size == AliasAnalysis::UnknownSize ||
- V2Size == AliasAnalysis::UnknownSize)
+ if (V1Size == MemoryLocation::UnknownSize ||
+ V2Size == MemoryLocation::UnknownSize)
return AliasAnalysis::MayAlias;
ConstantInt *C1 =
@@ -1017,8 +970,9 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
// Do the base pointers alias?
- AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(),
- UnderlyingV2, UnknownSize, AAMDNodes());
+ AliasResult BaseAlias =
+ aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
+ UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
// Check for geps of non-aliasing underlying pointers where the offsets are
// identical.
@@ -1109,11 +1063,12 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// pointer, we know they cannot alias.
// If both accesses are unknown size, we can't do anything useful here.
- if (V1Size == UnknownSize && V2Size == UnknownSize)
+ if (V1Size == MemoryLocation::UnknownSize &&
+ V2Size == MemoryLocation::UnknownSize)
return MayAlias;
- AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(),
- V2, V2Size, V2AAInfo);
+ AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
+ AAMDNodes(), V2, V2Size, V2AAInfo);
if (R != MustAlias)
// If V2 may alias GEP base pointer, conservatively returns MayAlias.
// If V2 is known not to alias GEP base pointer, then the two values
@@ -1153,7 +1108,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// greater, we know they do not overlap.
if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) {
if (GEP1BaseOffset >= 0) {
- if (V2Size != UnknownSize) {
+ if (V2Size != MemoryLocation::UnknownSize) {
if ((uint64_t)GEP1BaseOffset < V2Size)
return PartialAlias;
return NoAlias;
@@ -1167,7 +1122,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// GEP1 V2
// We need to know that V2Size is not unknown, otherwise we might have
// stripped a gep with negative index ('gep <ptr>, -1, ...).
- if (V1Size != UnknownSize && V2Size != UnknownSize) {
+ if (V1Size != MemoryLocation::UnknownSize &&
+ V2Size != MemoryLocation::UnknownSize) {
if (-(uint64_t)GEP1BaseOffset < V1Size)
return PartialAlias;
return NoAlias;
@@ -1218,8 +1174,9 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// mod Modulo. Check whether that difference guarantees that the
// two locations do not alias.
uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
- if (V1Size != UnknownSize && V2Size != UnknownSize &&
- ModOffset >= V2Size && V1Size <= Modulo - ModOffset)
+ if (V1Size != MemoryLocation::UnknownSize &&
+ V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
+ V1Size <= Modulo - ModOffset)
return NoAlias;
// If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
@@ -1302,8 +1259,8 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
// on corresponding edges.
if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
if (PN2->getParent() == PN->getParent()) {
- LocPair Locs(Location(PN, PNSize, PNAAInfo),
- Location(V2, V2Size, V2AAInfo));
+ LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
+ MemoryLocation(V2, V2Size, V2AAInfo));
if (PN > V2)
std::swap(Locs.first, Locs.second);
// Analyse the PHIs' inputs under the assumption that the PHIs are
@@ -1457,14 +1414,16 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias.
if (DL)
- if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
- (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
+ if ((V1Size != MemoryLocation::UnknownSize &&
+ isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
+ (V2Size != MemoryLocation::UnknownSize &&
+ isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
// otherwise infinitely recursive queries.
- LocPair Locs(Location(V1, V1Size, V1AAInfo),
- Location(V2, V2Size, V2AAInfo));
+ LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
+ MemoryLocation(V2, V2Size, V2AAInfo));
if (V1 > V2)
std::swap(Locs.first, Locs.second);
std::pair<AliasCacheTy::iterator, bool> Pair =
@@ -1511,13 +1470,15 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// accesses is accessing the entire object, then the accesses must
// overlap in some way.
if (DL && O1 == O2)
- if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) ||
- (V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI)))
+ if ((V1Size != MemoryLocation::UnknownSize &&
+ isObjectSize(O1, V1Size, *DL, *TLI)) ||
+ (V2Size != MemoryLocation::UnknownSize &&
+ isObjectSize(O2, V2Size, *DL, *TLI)))
return AliasCache[Locs] = PartialAlias;
AliasResult Result =
- AliasAnalysis::alias(Location(V1, V1Size, V1AAInfo),
- Location(V2, V2Size, V2AAInfo));
+ AliasAnalysis::alias(MemoryLocation(V1, V1Size, V1AAInfo),
+ MemoryLocation(V2, V2Size, V2AAInfo));
return AliasCache[Locs] = Result;
}
diff --git a/lib/Analysis/BlockFrequencyInfoImpl.cpp b/lib/Analysis/BlockFrequencyInfoImpl.cpp
index 456cee179f0b..daa77b81d6b3 100644
--- a/lib/Analysis/BlockFrequencyInfoImpl.cpp
+++ b/lib/Analysis/BlockFrequencyInfoImpl.cpp
@@ -286,7 +286,7 @@ bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
if (isLoopHeader(Resolved)) {
DEBUG(debugSuccessor("backedge"));
- Dist.addBackedge(OuterLoop->getHeader(), Weight);
+ Dist.addBackedge(Resolved, Weight);
return true;
}
@@ -349,7 +349,10 @@ void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
// LoopScale == 1 / ExitMass
// ExitMass == HeadMass - BackedgeMass
- BlockMass ExitMass = BlockMass::getFull() - Loop.BackedgeMass;
+ BlockMass TotalBackedgeMass;
+ for (auto &Mass : Loop.BackedgeMass)
+ TotalBackedgeMass += Mass;
+ BlockMass ExitMass = BlockMass::getFull() - TotalBackedgeMass;
// Block scale stores the inverse of the scale. If this is an infinite loop,
// its exit mass will be zero. In this case, use an arbitrary scale for the
@@ -358,7 +361,7 @@ void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
ExitMass.isEmpty() ? InifiniteLoopScale : ExitMass.toScaled().inverse();
DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull()
- << " - " << Loop.BackedgeMass << ")\n"
+ << " - " << TotalBackedgeMass << ")\n"
<< " - scale = " << Loop.Scale << "\n");
}
@@ -375,6 +378,19 @@ void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) {
Loop.IsPackaged = true;
}
+#ifndef NDEBUG
+static void debugAssign(const BlockFrequencyInfoImplBase &BFI,
+ const DitheringDistributer &D, const BlockNode &T,
+ const BlockMass &M, const char *Desc) {
+ dbgs() << " => assign " << M << " (" << D.RemMass << ")";
+ if (Desc)
+ dbgs() << " [" << Desc << "]";
+ if (T.isValid())
+ dbgs() << " to " << BFI.getBlockName(T);
+ dbgs() << "\n";
+}
+#endif
+
void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
LoopData *OuterLoop,
Distribution &Dist) {
@@ -384,25 +400,12 @@ void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
// Distribute mass to successors as laid out in Dist.
DitheringDistributer D(Dist, Mass);
-#ifndef NDEBUG
- auto debugAssign = [&](const BlockNode &T, const BlockMass &M,
- const char *Desc) {
- dbgs() << " => assign " << M << " (" << D.RemMass << ")";
- if (Desc)
- dbgs() << " [" << Desc << "]";
- if (T.isValid())
- dbgs() << " to " << getBlockName(T);
- dbgs() << "\n";
- };
- (void)debugAssign;
-#endif
-
for (const Weight &W : Dist.Weights) {
// Check for a local edge (non-backedge and non-exit).
BlockMass Taken = D.takeMass(W.Amount);
if (W.Type == Weight::Local) {
Working[W.TargetNode.Index].getMass() += Taken;
- DEBUG(debugAssign(W.TargetNode, Taken, nullptr));
+ DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
continue;
}
@@ -411,15 +414,15 @@ void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
// Check for a backedge.
if (W.Type == Weight::Backedge) {
- OuterLoop->BackedgeMass += Taken;
- DEBUG(debugAssign(BlockNode(), Taken, "back"));
+ OuterLoop->BackedgeMass[OuterLoop->getHeaderIndex(W.TargetNode)] += Taken;
+ DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "back"));
continue;
}
// This must be an exit.
assert(W.Type == Weight::Exit);
OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken));
- DEBUG(debugAssign(W.TargetNode, Taken, "exit"));
+ DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "exit"));
}
}
@@ -595,7 +598,7 @@ template <> struct GraphTraits<IrreducibleGraph> {
static ChildIteratorType child_begin(NodeType *N) { return N->succ_begin(); }
static ChildIteratorType child_end(NodeType *N) { return N->succ_end(); }
};
-}
+} // namespace llvm
/// \brief Find extra irreducible headers.
///
@@ -713,10 +716,44 @@ BlockFrequencyInfoImplBase::analyzeIrreducible(
void
BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) {
OuterLoop.Exits.clear();
- OuterLoop.BackedgeMass = BlockMass::getEmpty();
+ for (auto &Mass : OuterLoop.BackedgeMass)
+ Mass = BlockMass::getEmpty();
auto O = OuterLoop.Nodes.begin() + 1;
for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I)
if (!Working[I->Index].isPackaged())
*O++ = *I;
OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end());
}
+
+void BlockFrequencyInfoImplBase::adjustLoopHeaderMass(LoopData &Loop) {
+ assert(Loop.isIrreducible() && "this only makes sense on irreducible loops");
+
+ // Since the loop has more than one header block, the mass flowing back into
+ // each header will be different. Adjust the mass in each header loop to
+ // reflect the masses flowing through back edges.
+ //
+ // To do this, we distribute the initial mass using the backedge masses
+ // as weights for the distribution.
+ BlockMass LoopMass = BlockMass::getFull();
+ Distribution Dist;
+
+ DEBUG(dbgs() << "adjust-loop-header-mass:\n");
+ for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
+ auto &HeaderNode = Loop.Nodes[H];
+ auto &BackedgeMass = Loop.BackedgeMass[Loop.getHeaderIndex(HeaderNode)];
+ DEBUG(dbgs() << " - Add back edge mass for node "
+ << getBlockName(HeaderNode) << ": " << BackedgeMass << "\n");
+ Dist.addLocal(HeaderNode, BackedgeMass.getMass());
+ }
+
+ DitheringDistributer D(Dist, LoopMass);
+
+ DEBUG(dbgs() << " Distribute loop mass " << LoopMass
+ << " to headers using above weights\n");
+ for (const Weight &W : Dist.Weights) {
+ BlockMass Taken = D.takeMass(W.Amount);
+ assert(W.Type == Weight::Local && "all weights should be local");
+ Working[W.TargetNode.Index].getMass() = Taken;
+ DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr));
+ }
+}
diff --git a/lib/Analysis/CFGPrinter.cpp b/lib/Analysis/CFGPrinter.cpp
index c86f1f55954b..edd02c2fa0b2 100644
--- a/lib/Analysis/CFGPrinter.cpp
+++ b/lib/Analysis/CFGPrinter.cpp
@@ -40,7 +40,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char CFGViewer::ID = 0;
INITIALIZE_PASS(CFGViewer, "view-cfg", "View CFG of function", false, true)
@@ -63,7 +63,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char CFGOnlyViewer::ID = 0;
INITIALIZE_PASS(CFGOnlyViewer, "view-cfg-only",
@@ -97,7 +97,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char CFGPrinter::ID = 0;
INITIALIZE_PASS(CFGPrinter, "dot-cfg", "Print CFG of function to 'dot' file",
@@ -130,7 +130,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char CFGOnlyPrinter::ID = 0;
INITIALIZE_PASS(CFGOnlyPrinter, "dot-cfg-only",
diff --git a/lib/Analysis/CFLAliasAnalysis.cpp b/lib/Analysis/CFLAliasAnalysis.cpp
index 84b31dff055a..d937c0b2198a 100644
--- a/lib/Analysis/CFLAliasAnalysis.cpp
+++ b/lib/Analysis/CFLAliasAnalysis.cpp
@@ -14,8 +14,7 @@
// Alias Analysis" by Zhang Q, Lyu M R, Yuan H, and Su Z. -- to summarize the
// papers, we build a graph of the uses of a variable, where each node is a
// memory location, and each edge is an action that happened on that memory
-// location. The "actions" can be one of Dereference, Reference, Assign, or
-// Assign.
+// location. The "actions" can be one of Dereference, Reference, or Assign.
//
// Two variables are considered as aliasing iff you can reach one value's node
// from the other value's node and the language formed by concatenating all of
@@ -219,9 +218,10 @@ public:
return Iter->second;
}
- AliasResult query(const Location &LocA, const Location &LocB);
+ AliasResult query(const MemoryLocation &LocA, const MemoryLocation &LocB);
- AliasResult alias(const Location &LocA, const Location &LocB) override {
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override {
if (LocA.Ptr == LocB.Ptr) {
if (LocA.Size == LocB.Size) {
return MustAlias;
@@ -539,6 +539,19 @@ public:
Output.push_back(Edge(&Inst, From1, EdgeType::Assign, AttrNone));
Output.push_back(Edge(&Inst, From2, EdgeType::Assign, AttrNone));
}
+
+ void visitConstantExpr(ConstantExpr *CE) {
+ switch (CE->getOpcode()) {
+ default:
+ llvm_unreachable("Unknown instruction type encountered!");
+// Build the switch statement using the Instruction.def file.
+#define HANDLE_INST(NUM, OPCODE, CLASS) \
+ case Instruction::OPCODE: \
+ visit##OPCODE(*(CLASS *)CE); \
+ break;
+#include "llvm/IR/Instruction.def"
+ }
+ }
};
// For a given instruction, we need to know which Value* to get the
@@ -712,7 +725,7 @@ public:
typedef WeightedBidirectionalGraph<std::pair<EdgeType, StratifiedAttrs>> GraphT;
typedef DenseMap<Value *, GraphT::Node> NodeMapT;
-}
+} // namespace
// -- Setting up/registering CFLAA pass -- //
char CFLAliasAnalysis::ID = 0;
@@ -741,6 +754,10 @@ static EdgeType flipWeight(EdgeType);
static void argsToEdges(CFLAliasAnalysis &, Instruction *,
SmallVectorImpl<Edge> &);
+// Gets edges of the given ConstantExpr*, writing them to the SmallVector*.
+static void argsToEdges(CFLAliasAnalysis &, ConstantExpr *,
+ SmallVectorImpl<Edge> &);
+
// Gets the "Level" that one should travel in StratifiedSets
// given an EdgeType.
static Level directionOfEdgeType(EdgeType);
@@ -807,6 +824,13 @@ static bool hasUsefulEdges(Instruction *Inst) {
return !isa<CmpInst>(Inst) && !isa<FenceInst>(Inst) && !IsNonInvokeTerminator;
}
+static bool hasUsefulEdges(ConstantExpr *CE) {
+ // ConstantExpr doens't have terminators, invokes, or fences, so only needs
+ // to check for compares.
+ return CE->getOpcode() != Instruction::ICmp &&
+ CE->getOpcode() != Instruction::FCmp;
+}
+
static Optional<StratifiedAttr> valueToAttrIndex(Value *Val) {
if (isa<GlobalValue>(Val))
return AttrGlobalIndex;
@@ -846,6 +870,13 @@ static void argsToEdges(CFLAliasAnalysis &Analysis, Instruction *Inst,
v.visit(Inst);
}
+static void argsToEdges(CFLAliasAnalysis &Analysis, ConstantExpr *CE,
+ SmallVectorImpl<Edge> &Output) {
+ assert(hasUsefulEdges(CE) && "Expected constant expr to have 'useful' edges");
+ GetEdgesVisitor v(Analysis, Output);
+ v.visitConstantExpr(CE);
+}
+
static Level directionOfEdgeType(EdgeType Weight) {
switch (Weight) {
case EdgeType::Reference:
@@ -865,25 +896,23 @@ static void constexprToEdges(CFLAliasAnalysis &Analysis,
Worklist.push_back(&CExprToCollapse);
SmallVector<Edge, 8> ConstexprEdges;
+ SmallPtrSet<ConstantExpr *, 4> Visited;
while (!Worklist.empty()) {
auto *CExpr = Worklist.pop_back_val();
- std::unique_ptr<Instruction> Inst(CExpr->getAsInstruction());
- if (!hasUsefulEdges(Inst.get()))
+ if (!hasUsefulEdges(CExpr))
continue;
ConstexprEdges.clear();
- argsToEdges(Analysis, Inst.get(), ConstexprEdges);
+ argsToEdges(Analysis, CExpr, ConstexprEdges);
for (auto &Edge : ConstexprEdges) {
- if (Edge.From == Inst.get())
- Edge.From = CExpr;
- else if (auto *Nested = dyn_cast<ConstantExpr>(Edge.From))
- Worklist.push_back(Nested);
-
- if (Edge.To == Inst.get())
- Edge.To = CExpr;
- else if (auto *Nested = dyn_cast<ConstantExpr>(Edge.To))
- Worklist.push_back(Nested);
+ if (auto *Nested = dyn_cast<ConstantExpr>(Edge.From))
+ if (Visited.insert(Nested).second)
+ Worklist.push_back(Nested);
+
+ if (auto *Nested = dyn_cast<ConstantExpr>(Edge.To))
+ if (Visited.insert(Nested).second)
+ Worklist.push_back(Nested);
}
Results.append(ConstexprEdges.begin(), ConstexprEdges.end());
@@ -1080,9 +1109,8 @@ void CFLAliasAnalysis::scan(Function *Fn) {
Handles.push_front(FunctionHandle(Fn, this));
}
-AliasAnalysis::AliasResult
-CFLAliasAnalysis::query(const AliasAnalysis::Location &LocA,
- const AliasAnalysis::Location &LocB) {
+AliasAnalysis::AliasResult CFLAliasAnalysis::query(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
auto *ValA = const_cast<Value *>(LocA.Ptr);
auto *ValB = const_cast<Value *>(LocB.Ptr);
diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp
index 5a5475417951..92f6932bf8b9 100644
--- a/lib/Analysis/CaptureTracking.cpp
+++ b/lib/Analysis/CaptureTracking.cpp
@@ -110,7 +110,7 @@ namespace {
bool Captured;
};
-}
+} // namespace
/// PointerMayBeCaptured - Return true if this pointer value may be captured
/// by the enclosing function (which is required to exist). This routine can
diff --git a/lib/Analysis/DivergenceAnalysis.cpp b/lib/Analysis/DivergenceAnalysis.cpp
index e5ee2959c15d..3765adf4d98c 100644
--- a/lib/Analysis/DivergenceAnalysis.cpp
+++ b/lib/Analysis/DivergenceAnalysis.cpp
@@ -284,7 +284,7 @@ void DivergencePropagator::propagate() {
}
}
-} /// end namespace anonymous
+} // namespace
FunctionPass *llvm::createDivergenceAnalysisPass() {
return new DivergenceAnalysis();
diff --git a/lib/Analysis/DomPrinter.cpp b/lib/Analysis/DomPrinter.cpp
index 0c880df54f8e..0e0d174c2a48 100644
--- a/lib/Analysis/DomPrinter.cpp
+++ b/lib/Analysis/DomPrinter.cpp
@@ -78,7 +78,7 @@ struct DOTGraphTraits<PostDominatorTree*>
return DOTGraphTraits<DomTreeNode*>::getNodeLabel(Node, G->getRootNode());
}
};
-}
+} // namespace llvm
namespace {
struct DominatorTreeWrapperPassAnalysisGraphTraits {
diff --git a/lib/Analysis/IPA/CallGraph.cpp b/lib/Analysis/IPA/CallGraph.cpp
index 67cf7f86e072..e2799d965a7d 100644
--- a/lib/Analysis/IPA/CallGraph.cpp
+++ b/lib/Analysis/IPA/CallGraph.cpp
@@ -24,8 +24,8 @@ CallGraph::CallGraph(Module &M)
: M(M), Root(nullptr), ExternalCallingNode(getOrInsertFunction(nullptr)),
CallsExternalNode(new CallGraphNode(nullptr)) {
// Add every function to the call graph.
- for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
- addToCallGraph(I);
+ for (Function &F : M)
+ addToCallGraph(&F);
// If we didn't find a main function, use the external call graph node
if (!Root)
@@ -40,13 +40,11 @@ CallGraph::~CallGraph() {
// Reset all node's use counts to zero before deleting them to prevent an
// assertion from firing.
#ifndef NDEBUG
- for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
- I != E; ++I)
- I->second->allReferencesDropped();
+ for (auto &I : FunctionMap)
+ I.second->allReferencesDropped();
#endif
- for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
- I != E; ++I)
- delete I->second;
+ for (auto &I : FunctionMap)
+ delete I.second;
}
void CallGraph::addToCallGraph(Function *F) {
@@ -81,8 +79,10 @@ void CallGraph::addToCallGraph(Function *F) {
CallSite CS(cast<Value>(II));
if (CS) {
const Function *Callee = CS.getCalledFunction();
- if (!Callee)
+ if (!Callee || !Intrinsic::isLeaf(Callee->getIntrinsicID()))
// Indirect calls of intrinsics are not allowed so no need to check.
+ // We can be more precise here by using TargetArg returned by
+ // Intrinsic::isLeaf.
Node->addCalledFunction(CS, CallsExternalNode);
else if (!Callee->isIntrinsic())
Node->addCalledFunction(CS, getOrInsertFunction(Callee));
@@ -98,8 +98,26 @@ void CallGraph::print(raw_ostream &OS) const {
OS << "<<null function: 0x" << Root << ">>\n";
}
- for (CallGraph::const_iterator I = begin(), E = end(); I != E; ++I)
- I->second->print(OS);
+ // Print in a deterministic order by sorting CallGraphNodes by name. We do
+ // this here to avoid slowing down the non-printing fast path.
+
+ SmallVector<CallGraphNode *, 16> Nodes;
+ Nodes.reserve(FunctionMap.size());
+
+ for (auto I = begin(), E = end(); I != E; ++I)
+ Nodes.push_back(I->second);
+
+ std::sort(Nodes.begin(), Nodes.end(),
+ [](CallGraphNode *LHS, CallGraphNode *RHS) {
+ if (Function *LF = LHS->getFunction())
+ if (Function *RF = RHS->getFunction())
+ return LF->getName() < RF->getName();
+
+ return RHS->getFunction() != nullptr;
+ });
+
+ for (CallGraphNode *CN : Nodes)
+ CN->print(OS);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
diff --git a/lib/Analysis/IPA/CallGraphSCCPass.cpp b/lib/Analysis/IPA/CallGraphSCCPass.cpp
index 65ba1c7c6c47..6b3e06346269 100644
--- a/lib/Analysis/IPA/CallGraphSCCPass.cpp
+++ b/lib/Analysis/IPA/CallGraphSCCPass.cpp
@@ -217,8 +217,10 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
// another value. This can happen when constant folding happens
// of well known functions etc.
!CallSite(I->first) ||
- (CallSite(I->first).getCalledFunction() &&
- CallSite(I->first).getCalledFunction()->isIntrinsic())) {
+ (CallSite(I->first).getCalledFunction() &&
+ CallSite(I->first).getCalledFunction()->isIntrinsic() &&
+ Intrinsic::isLeaf(
+ CallSite(I->first).getCalledFunction()->getIntrinsicID()))) {
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
diff --git a/lib/Analysis/IPA/CallPrinter.cpp b/lib/Analysis/IPA/CallPrinter.cpp
index 68dcd3c06427..f183625dd776 100644
--- a/lib/Analysis/IPA/CallPrinter.cpp
+++ b/lib/Analysis/IPA/CallPrinter.cpp
@@ -41,7 +41,7 @@ struct AnalysisCallGraphWrapperPassTraits {
}
};
-} // end llvm namespace
+} // namespace llvm
namespace {
diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp
index 018ae99d6618..a32631d0c3b2 100644
--- a/lib/Analysis/IPA/GlobalsModRef.cpp
+++ b/lib/Analysis/IPA/GlobalsModRef.cpp
@@ -115,9 +115,10 @@ namespace {
//------------------------------------------------
// Implement the AliasAnalysis API
//
- AliasResult alias(const Location &LocA, const Location &LocB) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override {
return AliasAnalysis::getModRefInfo(CS1, CS2);
@@ -188,7 +189,7 @@ namespace {
GlobalValue *OkayStoreDest = nullptr);
bool AnalyzeIndirectGlobalMemory(GlobalValue *GV);
};
-}
+} // namespace
char GlobalsModRef::ID = 0;
INITIALIZE_AG_PASS_BEGIN(GlobalsModRef, AliasAnalysis,
@@ -478,9 +479,8 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
/// alias - If one of the pointers is to a global that we are tracking, and the
/// other is some random pointer, we know there cannot be an alias, because the
/// address of the global isn't taken.
-AliasAnalysis::AliasResult
-GlobalsModRef::alias(const Location &LocA,
- const Location &LocB) {
+AliasAnalysis::AliasResult GlobalsModRef::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
// Get the base object these pointers point to.
const Value *UV1 = GetUnderlyingObject(LocA.Ptr, *DL);
const Value *UV2 = GetUnderlyingObject(LocB.Ptr, *DL);
@@ -535,8 +535,7 @@ GlobalsModRef::alias(const Location &LocA,
}
AliasAnalysis::ModRefResult
-GlobalsModRef::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+GlobalsModRef::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
unsigned Known = ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
diff --git a/lib/Analysis/InstCount.cpp b/lib/Analysis/InstCount.cpp
index de2b9c0c56db..e76d26e8530b 100644
--- a/lib/Analysis/InstCount.cpp
+++ b/lib/Analysis/InstCount.cpp
@@ -64,7 +64,7 @@ namespace {
void print(raw_ostream &O, const Module *M) const override {}
};
-}
+} // namespace
char InstCount::ID = 0;
INITIALIZE_PASS(InstCount, "instcount",
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index ec56d888dc2f..12e406bb1a2d 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -854,8 +854,8 @@ static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
return X;
}
- // fsub nnan ninf x, x ==> 0.0
- if (FMF.noNaNs() && FMF.noInfs() && Op0 == Op1)
+ // fsub nnan x, x ==> 0.0
+ if (FMF.noNaNs() && Op0 == Op1)
return Constant::getNullValue(Op0->getType());
return nullptr;
@@ -1126,6 +1126,21 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
return Op0;
+ if (FMF.noNaNs()) {
+ // X / X -> 1.0 is legal when NaNs are ignored.
+ if (Op0 == Op1)
+ return ConstantFP::get(Op0->getType(), 1.0);
+
+ // -X / X -> -1.0 and
+ // X / -X -> -1.0 are legal when NaNs are ignored.
+ // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
+ if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) &&
+ BinaryOperator::getFNegArgument(Op0) == Op1) ||
+ (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) &&
+ BinaryOperator::getFNegArgument(Op1) == Op0))
+ return ConstantFP::get(Op0->getType(), -1.0);
+ }
+
return nullptr;
}
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index e6f586ac7029..f421d286e842 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -286,7 +286,7 @@ raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
<< Val.getConstantRange().getUpper() << '>';
return OS << "constant<" << *Val.getConstant() << '>';
}
-}
+} // namespace llvm
//===----------------------------------------------------------------------===//
// LazyValueInfoCache Decl
@@ -306,7 +306,7 @@ namespace {
deleted();
}
};
-}
+} // namespace
namespace {
/// This is the cache kept by LazyValueInfo which
@@ -1262,8 +1262,40 @@ LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
Instruction *CxtI) {
const DataLayout &DL = CxtI->getModule()->getDataLayout();
LVILatticeVal Result = getCache(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
-
- return getPredicateResult(Pred, C, Result, DL, TLI);
+ Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
+ if (Ret != Unknown)
+ return Ret;
+
+ // TODO: Move this logic inside getValueAt so that it can be cached rather
+ // than re-queried on each call. This would also allow us to merge the
+ // underlying lattice values to get more information
+ if (CxtI) {
+ // For a comparison where the V is outside this block, it's possible
+ // that we've branched on it before. Look to see if the value is known
+ // on all incoming edges.
+ BasicBlock *BB = CxtI->getParent();
+ pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
+ if (PI != PE &&
+ (!isa<Instruction>(V) ||
+ cast<Instruction>(V)->getParent() != BB)) {
+ // For predecessor edge, determine if the comparison is true or false
+ // on that edge. If they're all true or all false, we can conclude
+ // the value of the comparison in this block.
+ Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
+ if (Baseline != Unknown) {
+ // Check that all remaining incoming values match the first one.
+ while (++PI != PE) {
+ Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
+ if (Ret != Baseline) break;
+ }
+ // If we terminated early, then one of the values didn't match.
+ if (PI == PE) {
+ return Baseline;
+ }
+ }
+ }
+ }
+ return Unknown;
}
void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
diff --git a/lib/Analysis/LibCallAliasAnalysis.cpp b/lib/Analysis/LibCallAliasAnalysis.cpp
index f6025e3252e3..991a0e3e2752 100644
--- a/lib/Analysis/LibCallAliasAnalysis.cpp
+++ b/lib/Analysis/LibCallAliasAnalysis.cpp
@@ -48,7 +48,7 @@ bool LibCallAliasAnalysis::runOnFunction(Function &F) {
AliasAnalysis::ModRefResult
LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
// If we have a function, check to see what kind of mod/ref effects it
// has. Start by including any info globally known about the function.
AliasAnalysis::ModRefResult MRInfo = FI->UniversalBehavior;
@@ -122,7 +122,7 @@ LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
//
AliasAnalysis::ModRefResult
LibCallAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
ModRefResult MRInfo = ModRef;
// If this is a direct call to a function that LCI knows about, get the
diff --git a/lib/Analysis/LibCallSemantics.cpp b/lib/Analysis/LibCallSemantics.cpp
index e98540ba7e90..003c81e87b60 100644
--- a/lib/Analysis/LibCallSemantics.cpp
+++ b/lib/Analysis/LibCallSemantics.cpp
@@ -80,9 +80,8 @@ EHPersonality llvm::classifyEHPersonality(const Value *Pers) {
.Default(EHPersonality::Unknown);
}
-bool llvm::canSimplifyInvokeNoUnwind(const InvokeInst *II) {
- const LandingPadInst *LP = II->getLandingPadInst();
- EHPersonality Personality = classifyEHPersonality(LP->getPersonalityFn());
+bool llvm::canSimplifyInvokeNoUnwind(const Function *F) {
+ EHPersonality Personality = classifyEHPersonality(F->getPersonalityFn());
// We can't simplify any invokes to nounwind functions if the personality
// function wants to catch asynch exceptions. The nounwind attribute only
// implies that the function does not throw synchronous exceptions.
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 65a90d7bcd87..6ea6ccbfbe99 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -157,7 +157,7 @@ namespace {
WriteValues({V1, Vs...});
}
};
-}
+} // namespace
char Lint::ID = 0;
INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
@@ -202,8 +202,8 @@ void Lint::visitCallSite(CallSite CS) {
Value *Callee = CS.getCalledValue();
const DataLayout &DL = CS->getModule()->getDataLayout();
- visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Callee);
+ visitMemoryReference(I, Callee, MemoryLocation::UnknownSize, 0, nullptr,
+ MemRef::Callee);
if (Function *F = dyn_cast<Function>(findValue(Callee, DL,
/*OffsetOk=*/false))) {
@@ -282,12 +282,10 @@ void Lint::visitCallSite(CallSite CS) {
case Intrinsic::memcpy: {
MemCpyInst *MCI = cast<MemCpyInst>(&I);
// TODO: If the size is known, use it.
- visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize,
- MCI->getAlignment(), nullptr,
- MemRef::Write);
- visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize,
- MCI->getAlignment(), nullptr,
- MemRef::Read);
+ visitMemoryReference(I, MCI->getDest(), MemoryLocation::UnknownSize,
+ MCI->getAlignment(), nullptr, MemRef::Write);
+ visitMemoryReference(I, MCI->getSource(), MemoryLocation::UnknownSize,
+ MCI->getAlignment(), nullptr, MemRef::Read);
// Check that the memcpy arguments don't overlap. The AliasAnalysis API
// isn't expressive enough for what we really want to do. Known partial
@@ -306,20 +304,17 @@ void Lint::visitCallSite(CallSite CS) {
case Intrinsic::memmove: {
MemMoveInst *MMI = cast<MemMoveInst>(&I);
// TODO: If the size is known, use it.
- visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize,
- MMI->getAlignment(), nullptr,
- MemRef::Write);
- visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize,
- MMI->getAlignment(), nullptr,
- MemRef::Read);
+ visitMemoryReference(I, MMI->getDest(), MemoryLocation::UnknownSize,
+ MMI->getAlignment(), nullptr, MemRef::Write);
+ visitMemoryReference(I, MMI->getSource(), MemoryLocation::UnknownSize,
+ MMI->getAlignment(), nullptr, MemRef::Read);
break;
}
case Intrinsic::memset: {
MemSetInst *MSI = cast<MemSetInst>(&I);
// TODO: If the size is known, use it.
- visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize,
- MSI->getAlignment(), nullptr,
- MemRef::Write);
+ visitMemoryReference(I, MSI->getDest(), MemoryLocation::UnknownSize,
+ MSI->getAlignment(), nullptr, MemRef::Write);
break;
}
@@ -328,26 +323,26 @@ void Lint::visitCallSite(CallSite CS) {
"Undefined behavior: va_start called in a non-varargs function",
&I);
- visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::vacopy:
- visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Write);
- visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Read);
+ visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(1), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Read);
break;
case Intrinsic::vaend:
- visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::stackrestore:
// Stackrestore doesn't read or write memory, but it sets the
// stack pointer, which the compiler may read from or write to
// at any time, so check it for both readability and writeability.
- visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, nullptr, MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
+ nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::eh_begincatch:
@@ -435,7 +430,7 @@ void Lint::visitMemoryReference(Instruction &I,
// OK, so the access is to a constant offset from Ptr. Check that Ptr is
// something we can handle and if so extract the size of this base object
// along with its alignment.
- uint64_t BaseSize = AliasAnalysis::UnknownSize;
+ uint64_t BaseSize = MemoryLocation::UnknownSize;
unsigned BaseAlign = 0;
if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
@@ -460,8 +455,8 @@ void Lint::visitMemoryReference(Instruction &I,
// Accesses from before the start or after the end of the object are not
// defined.
- Assert(Size == AliasAnalysis::UnknownSize ||
- BaseSize == AliasAnalysis::UnknownSize ||
+ Assert(Size == MemoryLocation::UnknownSize ||
+ BaseSize == MemoryLocation::UnknownSize ||
(Offset >= 0 && Offset + Size <= BaseSize),
"Undefined behavior: Buffer overflow", &I);
@@ -770,12 +765,12 @@ void Lint::visitAllocaInst(AllocaInst &I) {
}
void Lint::visitVAArgInst(VAArgInst &I) {
- visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0,
+ visitMemoryReference(I, I.getOperand(0), MemoryLocation::UnknownSize, 0,
nullptr, MemRef::Read | MemRef::Write);
}
void Lint::visitIndirectBrInst(IndirectBrInst &I) {
- visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0,
+ visitMemoryReference(I, I.getAddress(), MemoryLocation::UnknownSize, 0,
nullptr, MemRef::Branchee);
Assert(I.getNumDestinations() != 0,
diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp
index c661c7b87dcb..8425b75f3ff9 100644
--- a/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/lib/Analysis/LoopAccessAnalysis.cpp
@@ -210,18 +210,18 @@ public:
: DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckNeeded(false) {}
/// \brief Register a load and whether it is only read from.
- void addLoad(AliasAnalysis::Location &Loc, bool IsReadOnly) {
+ void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
Value *Ptr = const_cast<Value*>(Loc.Ptr);
- AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
+ AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
Accesses.insert(MemAccessInfo(Ptr, false));
if (IsReadOnly)
ReadOnlyPtr.insert(Ptr);
}
/// \brief Register a store.
- void addStore(AliasAnalysis::Location &Loc) {
+ void addStore(MemoryLocation &Loc) {
Value *Ptr = const_cast<Value*>(Loc.Ptr);
- AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
+ AST.add(Ptr, MemoryLocation::UnknownSize, Loc.AATags);
Accesses.insert(MemAccessInfo(Ptr, true));
}
@@ -1150,7 +1150,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
if (Seen.insert(Ptr).second) {
++NumReadWrites;
- AliasAnalysis::Location Loc = MemoryLocation::get(ST);
+ MemoryLocation Loc = MemoryLocation::get(ST);
// The TBAA metadata could have a control dependency on the predication
// condition, so we cannot rely on it when determining whether or not we
// need runtime pointer checks.
@@ -1186,7 +1186,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
IsReadOnlyPtr = true;
}
- AliasAnalysis::Location Loc = MemoryLocation::get(LD);
+ MemoryLocation Loc = MemoryLocation::get(LD);
// The TBAA metadata could have a control dependency on the predication
// condition, so we cannot rely on it when determining whether or not we
// need runtime pointer checks.
diff --git a/lib/Analysis/LoopPass.cpp b/lib/Analysis/LoopPass.cpp
index e9fcf02118b9..81b7ecd480bf 100644
--- a/lib/Analysis/LoopPass.cpp
+++ b/lib/Analysis/LoopPass.cpp
@@ -56,7 +56,7 @@ public:
};
char PrintLoopPass::ID = 0;
-}
+} // namespace
//===----------------------------------------------------------------------===//
// LPPassManager
diff --git a/lib/Analysis/MemDepPrinter.cpp b/lib/Analysis/MemDepPrinter.cpp
index da3b829b6d31..54a04d9856b7 100644
--- a/lib/Analysis/MemDepPrinter.cpp
+++ b/lib/Analysis/MemDepPrinter.cpp
@@ -74,7 +74,7 @@ namespace {
return InstTypePair(inst, type);
}
};
-}
+} // namespace
char MemDepPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(MemDepPrinter, "print-memdeps",
diff --git a/lib/Analysis/MemDerefPrinter.cpp b/lib/Analysis/MemDerefPrinter.cpp
index fa292a28ec87..b0194d33d0e8 100644
--- a/lib/Analysis/MemDerefPrinter.cpp
+++ b/lib/Analysis/MemDerefPrinter.cpp
@@ -37,7 +37,7 @@ namespace {
Vec.clear();
}
};
-}
+} // namespace
char MemDerefPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(MemDerefPrinter, "print-memderefs",
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index 255bae61eb2f..cf8ba5ccb725 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -118,10 +118,8 @@ static void RemoveFromReverseMap(DenseMap<Instruction*,
/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
/// Return a ModRefInfo value describing the general behavior of the
/// instruction.
-static
-AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
- AliasAnalysis::Location &Loc,
- AliasAnalysis *AA) {
+static AliasAnalysis::ModRefResult
+GetLocation(const Instruction *Inst, MemoryLocation &Loc, AliasAnalysis *AA) {
if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
if (LI->isUnordered()) {
Loc = MemoryLocation::get(LI);
@@ -131,7 +129,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
Loc = MemoryLocation::get(LI);
return AliasAnalysis::ModRef;
}
- Loc = AliasAnalysis::Location();
+ Loc = MemoryLocation();
return AliasAnalysis::ModRef;
}
@@ -144,7 +142,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
Loc = MemoryLocation::get(SI);
return AliasAnalysis::ModRef;
}
- Loc = AliasAnalysis::Location();
+ Loc = MemoryLocation();
return AliasAnalysis::ModRef;
}
@@ -155,7 +153,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
// calls to free() deallocate the entire structure
- Loc = AliasAnalysis::Location(CI->getArgOperand(0));
+ Loc = MemoryLocation(CI->getArgOperand(0));
return AliasAnalysis::Mod;
}
@@ -167,17 +165,17 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
II->getAAMetadata(AAInfo);
- Loc = AliasAnalysis::Location(II->getArgOperand(1),
- cast<ConstantInt>(II->getArgOperand(0))
- ->getZExtValue(), AAInfo);
+ Loc = MemoryLocation(
+ II->getArgOperand(1),
+ cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
return AliasAnalysis::Mod;
case Intrinsic::invariant_end:
II->getAAMetadata(AAInfo);
- Loc = AliasAnalysis::Location(II->getArgOperand(2),
- cast<ConstantInt>(II->getArgOperand(1))
- ->getZExtValue(), AAInfo);
+ Loc = MemoryLocation(
+ II->getArgOperand(2),
+ cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
return AliasAnalysis::Mod;
@@ -212,7 +210,7 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
Instruction *Inst = --ScanIt;
// If this inst is a memory op, get the pointer it accessed
- AliasAnalysis::Location Loc;
+ MemoryLocation Loc;
AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
if (Loc.Ptr) {
// A simple instruction.
@@ -259,9 +257,10 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
///
/// MemLocBase, MemLocOffset are lazily computed here the first time the
/// base/offs of memloc is needed.
-static bool isLoadLoadClobberIfExtendedToFullWidth(
- const AliasAnalysis::Location &MemLoc, const Value *&MemLocBase,
- int64_t &MemLocOffs, const LoadInst *LI) {
+static bool isLoadLoadClobberIfExtendedToFullWidth(const MemoryLocation &MemLoc,
+ const Value *&MemLocBase,
+ int64_t &MemLocOffs,
+ const LoadInst *LI) {
const DataLayout &DL = LI->getModule()->getDataLayout();
// If we haven't already computed the base/offset of MemLoc, do so now.
@@ -368,10 +367,9 @@ static bool isVolatile(Instruction *Inst) {
/// with reads from read-only locations. If possible, pass the query
/// instruction as well; this function may take advantage of the metadata
/// annotated to the query instruction to refine the result.
-MemDepResult MemoryDependenceAnalysis::
-getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
- BasicBlock::iterator ScanIt, BasicBlock *BB,
- Instruction *QueryInst) {
+MemDepResult MemoryDependenceAnalysis::getPointerDependencyFrom(
+ const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
+ BasicBlock *BB, Instruction *QueryInst) {
const Value *MemLocBase = nullptr;
int64_t MemLocOffset = 0;
@@ -440,8 +438,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point (the right approach is to use
// GetPointerBaseWithConstantOffset).
- if (AA->isMustAlias(AliasAnalysis::Location(II->getArgOperand(1)),
- MemLoc))
+ if (AA->isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc))
return MemDepResult::getDef(II);
continue;
}
@@ -486,7 +483,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
}
}
- AliasAnalysis::Location LoadLoc = MemoryLocation::get(LI);
+ MemoryLocation LoadLoc = MemoryLocation::get(LI);
// If we found a pointer, check if it could be the same as our pointer.
AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
@@ -575,7 +572,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// Ok, this store might clobber the query pointer. Check to see if it is
// a must alias: in this case, we want to return this as a def.
- AliasAnalysis::Location StoreLoc = MemoryLocation::get(SI);
+ MemoryLocation StoreLoc = MemoryLocation::get(SI);
// If we found a pointer, check if it could be the same as our pointer.
AliasAnalysis::AliasResult R = AA->alias(StoreLoc, MemLoc);
@@ -679,7 +676,7 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
else
LocalCache = MemDepResult::getNonFuncLocal();
} else {
- AliasAnalysis::Location MemLoc;
+ MemoryLocation MemLoc;
AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
if (MemLoc.Ptr) {
// If we can do a pointer scan, make it happen.
@@ -872,7 +869,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
void MemoryDependenceAnalysis::
getNonLocalPointerDependency(Instruction *QueryInst,
SmallVectorImpl<NonLocalDepResult> &Result) {
- const AliasAnalysis::Location Loc = MemoryLocation::get(QueryInst);
+ const MemoryLocation Loc = MemoryLocation::get(QueryInst);
bool isLoad = isa<LoadInst>(QueryInst);
BasicBlock *FromBB = QueryInst->getParent();
assert(FromBB);
@@ -924,11 +921,9 @@ getNonLocalPointerDependency(Instruction *QueryInst,
/// Pointer/PointeeSize using either cached information in Cache or by doing a
/// lookup (which may use dirty cache info if available). If we do a lookup,
/// add the result to the cache.
-MemDepResult MemoryDependenceAnalysis::
-GetNonLocalInfoForBlock(Instruction *QueryInst,
- const AliasAnalysis::Location &Loc,
- bool isLoad, BasicBlock *BB,
- NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
+MemDepResult MemoryDependenceAnalysis::GetNonLocalInfoForBlock(
+ Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad,
+ BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
// Do a binary search to see if we already have an entry for this block in
// the cache set. If so, find it.
@@ -1040,14 +1035,11 @@ SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
/// This function returns false on success, or true to indicate that it could
/// not compute dependence information for some reason. This should be treated
/// as a clobber dependence on the first instruction in the predecessor block.
-bool MemoryDependenceAnalysis::
-getNonLocalPointerDepFromBB(Instruction *QueryInst,
- const PHITransAddr &Pointer,
- const AliasAnalysis::Location &Loc,
- bool isLoad, BasicBlock *StartBB,
- SmallVectorImpl<NonLocalDepResult> &Result,
- DenseMap<BasicBlock*, Value*> &Visited,
- bool SkipFirstBlock) {
+bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB(
+ Instruction *QueryInst, const PHITransAddr &Pointer,
+ const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB,
+ SmallVectorImpl<NonLocalDepResult> &Result,
+ DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) {
// Look up the cached info for Pointer.
ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
diff --git a/lib/Analysis/MemoryLocation.cpp b/lib/Analysis/MemoryLocation.cpp
index f87a017b9211..e4491261e055 100644
--- a/lib/Analysis/MemoryLocation.cpp
+++ b/lib/Analysis/MemoryLocation.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
@@ -88,3 +89,86 @@ MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MTI) {
return MemoryLocation(MTI->getRawDest(), Size, AATags);
}
+
+// FIXME: This code is duplicated with BasicAliasAnalysis and should be hoisted
+// to some common utility location.
+static bool isMemsetPattern16(const Function *MS,
+ const TargetLibraryInfo &TLI) {
+ if (TLI.has(LibFunc::memset_pattern16) &&
+ MS->getName() == "memset_pattern16") {
+ FunctionType *MemsetType = MS->getFunctionType();
+ if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
+ isa<PointerType>(MemsetType->getParamType(0)) &&
+ isa<PointerType>(MemsetType->getParamType(1)) &&
+ isa<IntegerType>(MemsetType->getParamType(2)))
+ return true;
+ }
+
+ return false;
+}
+
+MemoryLocation MemoryLocation::getForArgument(ImmutableCallSite CS,
+ unsigned ArgIdx,
+ const TargetLibraryInfo &TLI) {
+ AAMDNodes AATags;
+ CS->getAAMetadata(AATags);
+ const Value *Arg = CS.getArgument(ArgIdx);
+
+ // We may be able to produce an exact size for known intrinsics.
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
+ const DataLayout &DL = II->getModule()->getDataLayout();
+
+ switch (II->getIntrinsicID()) {
+ default:
+ break;
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ assert((ArgIdx == 0 || ArgIdx == 1) &&
+ "Invalid argument index for memory intrinsic");
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
+ return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
+ break;
+
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start:
+ assert(ArgIdx == 1 && "Invalid argument index");
+ return MemoryLocation(
+ Arg, cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AATags);
+
+ case Intrinsic::invariant_end:
+ assert(ArgIdx == 2 && "Invalid argument index");
+ return MemoryLocation(
+ Arg, cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AATags);
+
+ case Intrinsic::arm_neon_vld1:
+ assert(ArgIdx == 0 && "Invalid argument index");
+ // LLVM's vld1 and vst1 intrinsics currently only support a single
+ // vector register.
+ return MemoryLocation(Arg, DL.getTypeStoreSize(II->getType()), AATags);
+
+ case Intrinsic::arm_neon_vst1:
+ assert(ArgIdx == 0 && "Invalid argument index");
+ return MemoryLocation(
+ Arg, DL.getTypeStoreSize(II->getArgOperand(1)->getType()), AATags);
+ }
+ }
+
+ // We can bound the aliasing properties of memset_pattern16 just as we can
+ // for memcpy/memset. This is particularly important because the
+ // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
+ // whenever possible.
+ if (CS.getCalledFunction() &&
+ isMemsetPattern16(CS.getCalledFunction(), TLI)) {
+ assert((ArgIdx == 0 || ArgIdx == 1) &&
+ "Invalid argument index for memset_pattern16");
+ if (ArgIdx == 1)
+ return MemoryLocation(Arg, 16, AATags);
+ if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2)))
+ return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
+ }
+ // FIXME: Handle memset_pattern4 and memset_pattern8 also.
+
+ return MemoryLocation(CS.getArgument(ArgIdx), UnknownSize, AATags);
+}
diff --git a/lib/Analysis/ModuleDebugInfoPrinter.cpp b/lib/Analysis/ModuleDebugInfoPrinter.cpp
index 36c47141a45f..45ae818c35bf 100644
--- a/lib/Analysis/ModuleDebugInfoPrinter.cpp
+++ b/lib/Analysis/ModuleDebugInfoPrinter.cpp
@@ -40,7 +40,7 @@ namespace {
}
void print(raw_ostream &O, const Module *M) const override;
};
-}
+} // namespace
char ModuleDebugInfoPrinter::ID = 0;
INITIALIZE_PASS(ModuleDebugInfoPrinter, "module-debuginfo",
diff --git a/lib/Analysis/NoAliasAnalysis.cpp b/lib/Analysis/NoAliasAnalysis.cpp
index 203e1daf7a09..7617622b9ab6 100644
--- a/lib/Analysis/NoAliasAnalysis.cpp
+++ b/lib/Analysis/NoAliasAnalysis.cpp
@@ -41,7 +41,8 @@ namespace {
return true;
}
- AliasResult alias(const Location &LocA, const Location &LocB) override {
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override {
return MayAlias;
}
@@ -52,19 +53,17 @@ namespace {
return UnknownModRefBehavior;
}
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override {
return false;
}
- Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask) override {
- Mask = ModRef;
- AAMDNodes AATags;
- CS->getAAMetadata(AATags);
- return Location(CS.getArgument(ArgIdx), UnknownSize, AATags);
+ ModRefResult getArgModRefInfo(ImmutableCallSite CS,
+ unsigned ArgIdx) override {
+ return ModRef;
}
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override {
+ const MemoryLocation &Loc) override {
return ModRef;
}
ModRefResult getModRefInfo(ImmutableCallSite CS1,
diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp
index 633d6aaad35e..8d80c6028ba3 100644
--- a/lib/Analysis/PHITransAddr.cpp
+++ b/lib/Analysis/PHITransAddr.cpp
@@ -244,13 +244,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
GEPI->getNumOperands() == GEPOps.size() &&
GEPI->getParent()->getParent() == CurBB->getParent() &&
(!DT || DT->dominates(GEPI->getParent(), PredBB))) {
- bool Mismatch = false;
- for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
- if (GEPI->getOperand(i) != GEPOps[i]) {
- Mismatch = true;
- break;
- }
- if (!Mismatch)
+ if (std::equal(GEPOps.begin(), GEPOps.end(), GEPI->op_begin()))
return GEPI;
}
}
@@ -392,10 +386,10 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
if (!OpVal) return nullptr;
// Otherwise insert a cast at the end of PredBB.
- CastInst *New = CastInst::Create(Cast->getOpcode(),
- OpVal, InVal->getType(),
- InVal->getName()+".phi.trans.insert",
+ CastInst *New = CastInst::Create(Cast->getOpcode(), OpVal, InVal->getType(),
+ InVal->getName() + ".phi.trans.insert",
PredBB->getTerminator());
+ New->setDebugLoc(Inst->getDebugLoc());
NewInsts.push_back(New);
return New;
}
@@ -414,6 +408,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
GetElementPtrInst *Result = GetElementPtrInst::Create(
GEP->getSourceElementType(), GEPOps[0], makeArrayRef(GEPOps).slice(1),
InVal->getName() + ".phi.trans.insert", PredBB->getTerminator());
+ Result->setDebugLoc(Inst->getDebugLoc());
Result->setIsInBounds(GEP->isInBounds());
NewInsts.push_back(Result);
return Result;
diff --git a/lib/Analysis/RegionPrinter.cpp b/lib/Analysis/RegionPrinter.cpp
index d7f510984881..2b09becaac38 100644
--- a/lib/Analysis/RegionPrinter.cpp
+++ b/lib/Analysis/RegionPrinter.cpp
@@ -194,7 +194,7 @@ struct RegionOnlyPrinter
}
};
-}
+} // namespace
char RegionOnlyPrinter::ID = 0;
INITIALIZE_PASS(RegionOnlyPrinter, "dot-regions-only",
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 0e9f812c05e2..81e07e99dca1 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -627,7 +627,7 @@ namespace {
llvm_unreachable("Unknown SCEV kind!");
}
};
-}
+} // namespace
/// GroupByComplexity - Given a list of SCEV objects, order them by their
/// complexity, and group objects of the same complexity together by value.
@@ -689,7 +689,7 @@ struct FindSCEVSize {
return false;
}
};
-}
+} // namespace
// Returns the size of the SCEV S.
static inline int sizeOfSCEV(const SCEV *S) {
@@ -937,7 +937,7 @@ private:
const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
// Simple SCEV method implementations
@@ -1248,7 +1248,7 @@ struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
-}
+} // namespace
// The recurrence AR has been shown to have no signed/unsigned wrap or something
// close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
@@ -3300,7 +3300,7 @@ namespace {
}
bool isDone() const { return FindOne; }
};
-}
+} // namespace
bool ScalarEvolution::checkValidity(const SCEV *S) const {
FindInvalidSCEVUnknown F;
@@ -7594,7 +7594,7 @@ struct FindUndefs {
return Found;
}
};
-}
+} // namespace
// Return true when S contains at least an undef value.
static inline bool
@@ -7644,7 +7644,7 @@ struct SCEVCollectTerms {
}
bool isDone() const { return false; }
};
-}
+} // namespace
/// Find parametric terms in this SCEVAddRecExpr.
void SCEVAddRecExpr::collectParametricTerms(
@@ -7737,7 +7737,7 @@ struct FindParameter {
return FoundParameter;
}
};
-}
+} // namespace
// Returns true when S contains at least a SCEVUnknown parameter.
static inline bool
@@ -8418,7 +8418,7 @@ struct SCEVSearch {
}
bool isDone() const { return IsFound; }
};
-}
+} // namespace
bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
SCEVSearch Search(Op);
diff --git a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index ccec0a877f5a..2d45c59a500c 100644
--- a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -53,7 +53,8 @@ namespace {
private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
- AliasResult alias(const Location &LocA, const Location &LocB) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
Value *GetBaseValue(const SCEV *S);
};
@@ -107,8 +108,8 @@ ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) {
}
AliasAnalysis::AliasResult
-ScalarEvolutionAliasAnalysis::alias(const Location &LocA,
- const Location &LocB) {
+ScalarEvolutionAliasAnalysis::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
// If either of the memory references is empty, it doesn't matter what the
// pointer values are. This allows the code below to ignore this special
// case.
@@ -161,12 +162,12 @@ ScalarEvolutionAliasAnalysis::alias(const Location &LocA,
Value *AO = GetBaseValue(AS);
Value *BO = GetBaseValue(BS);
if ((AO && AO != LocA.Ptr) || (BO && BO != LocB.Ptr))
- if (alias(Location(AO ? AO : LocA.Ptr,
- AO ? +UnknownSize : LocA.Size,
- AO ? AAMDNodes() : LocA.AATags),
- Location(BO ? BO : LocB.Ptr,
- BO ? +UnknownSize : LocB.Size,
- BO ? AAMDNodes() : LocB.AATags)) == NoAlias)
+ if (alias(MemoryLocation(AO ? AO : LocA.Ptr,
+ AO ? +MemoryLocation::UnknownSize : LocA.Size,
+ AO ? AAMDNodes() : LocA.AATags),
+ MemoryLocation(BO ? BO : LocB.Ptr,
+ BO ? +MemoryLocation::UnknownSize : LocB.Size,
+ BO ? AAMDNodes() : LocB.AATags)) == NoAlias)
return NoAlias;
// Forward the query to the next analysis.
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index f82235d0c26e..0264ad143f49 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -661,7 +661,7 @@ public:
}
};
-}
+} // namespace
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
Type *Ty = SE.getEffectiveSCEVType(S->getType());
@@ -1702,7 +1702,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
unsigned NumElim = 0;
DenseMap<const SCEV *, PHINode *> ExprToIVMap;
- // Process phis from wide to narrow. Mapping wide phis to the their truncation
+ // Process phis from wide to narrow. Map wide phis to their truncation
// so narrow phis can reuse them.
for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
PEnd = Phis.end(); PIter != PEnd; ++PIter) {
@@ -1933,7 +1933,7 @@ struct SCEVFindUnsafe {
}
bool isDone() const { return IsUnsafe; }
};
-}
+} // namespace
namespace llvm {
bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
diff --git a/lib/Analysis/ScopedNoAliasAA.cpp b/lib/Analysis/ScopedNoAliasAA.cpp
index 02f8b0b1384f..a8cfeb67ef94 100644
--- a/lib/Analysis/ScopedNoAliasAA.cpp
+++ b/lib/Analysis/ScopedNoAliasAA.cpp
@@ -99,12 +99,13 @@ protected:
private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
- AliasResult alias(const Location &LocA, const Location &LocB) override;
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
+ bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) override;
ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
ModRefBehavior getModRefBehavior(const Function *F) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override;
};
@@ -176,8 +177,8 @@ ScopedNoAliasAA::mayAliasInScopes(const MDNode *Scopes,
return true;
}
-AliasAnalysis::AliasResult
-ScopedNoAliasAA::alias(const Location &LocA, const Location &LocB) {
+AliasAnalysis::AliasResult ScopedNoAliasAA::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
if (!EnableScopedNoAlias)
return AliasAnalysis::alias(LocA, LocB);
@@ -198,7 +199,7 @@ ScopedNoAliasAA::alias(const Location &LocA, const Location &LocB) {
return AliasAnalysis::alias(LocA, LocB);
}
-bool ScopedNoAliasAA::pointsToConstantMemory(const Location &Loc,
+bool ScopedNoAliasAA::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
}
@@ -214,7 +215,8 @@ ScopedNoAliasAA::getModRefBehavior(const Function *F) {
}
AliasAnalysis::ModRefResult
-ScopedNoAliasAA::getModRefInfo(ImmutableCallSite CS, const Location &Loc) {
+ScopedNoAliasAA::getModRefInfo(ImmutableCallSite CS,
+ const MemoryLocation &Loc) {
if (!EnableScopedNoAlias)
return AliasAnalysis::getModRefInfo(CS, Loc);
diff --git a/lib/Analysis/StratifiedSets.h b/lib/Analysis/StratifiedSets.h
index fd3fbc0d86ad..878ca3d4c70b 100644
--- a/lib/Analysis/StratifiedSets.h
+++ b/lib/Analysis/StratifiedSets.h
@@ -688,5 +688,5 @@ private:
bool inbounds(StratifiedIndex N) const { return N < Links.size(); }
};
-}
+} // namespace llvm
#endif // LLVM_ADT_STRATIFIEDSETS_H
diff --git a/lib/Analysis/TypeBasedAliasAnalysis.cpp b/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 115872584cb2..82d29e0dc3fb 100644
--- a/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -270,7 +270,7 @@ namespace {
return TBAAStructTypeNode(P);
}
};
-}
+} // namespace
namespace {
/// TypeBasedAliasAnalysis - This is a simple alias analysis
@@ -300,12 +300,14 @@ namespace {
private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
- AliasResult alias(const Location &LocA, const Location &LocB) override;
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override;
ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
ModRefBehavior getModRefBehavior(const Function *F) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override;
};
@@ -453,8 +455,8 @@ TypeBasedAliasAnalysis::PathAliases(const MDNode *A,
}
AliasAnalysis::AliasResult
-TypeBasedAliasAnalysis::alias(const Location &LocA,
- const Location &LocB) {
+TypeBasedAliasAnalysis::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
if (!EnableTBAA)
return AliasAnalysis::alias(LocA, LocB);
@@ -473,7 +475,7 @@ TypeBasedAliasAnalysis::alias(const Location &LocA,
return NoAlias;
}
-bool TypeBasedAliasAnalysis::pointsToConstantMemory(const Location &Loc,
+bool TypeBasedAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
if (!EnableTBAA)
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
@@ -515,7 +517,7 @@ TypeBasedAliasAnalysis::getModRefBehavior(const Function *F) {
AliasAnalysis::ModRefResult
TypeBasedAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ const MemoryLocation &Loc) {
if (!EnableTBAA)
return AliasAnalysis::getModRefInfo(CS, Loc);
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index c4f046340fce..c45005f343d3 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -551,12 +551,17 @@ static void computeKnownBitsFromTrueCondition(Value *V, ICmpInst *Cmp,
}
break;
case ICmpInst::ICMP_EQ:
- if (LHS == V)
- computeKnownBits(RHS, KnownZero, KnownOne, DL, Depth + 1, Q);
- else if (RHS == V)
- computeKnownBits(LHS, KnownZero, KnownOne, DL, Depth + 1, Q);
- else
- llvm_unreachable("missing use?");
+ {
+ APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
+ if (LHS == V)
+ computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
+ else if (RHS == V)
+ computeKnownBits(LHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
+ else
+ llvm_unreachable("missing use?");
+ KnownZero |= KnownZeroTemp;
+ KnownOne |= KnownOneTemp;
+ }
break;
case ICmpInst::ICMP_ULE:
if (LHS == V) {
@@ -936,147 +941,11 @@ static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
}
}
-/// Determine which bits of V are known to be either zero or one and return
-/// them in the KnownZero/KnownOne bit sets.
-///
-/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
-/// we cannot optimize based on the assumption that it is zero without changing
-/// it to be an explicit zero. If we don't change it to zero, other code could
-/// optimized based on the contradictory assumption that it is non-zero.
-/// Because instcombine aggressively folds operations with undef args anyway,
-/// this won't lose us code quality.
-///
-/// This function is defined on values with integer type, values with pointer
-/// type, and vectors of integers. In the case
-/// where V is a vector, known zero, and known one values are the
-/// same width as the vector element, and the bit is set only if it is true
-/// for all of the elements in the vector.
-void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
- const DataLayout &DL, unsigned Depth, const Query &Q) {
- assert(V && "No Value?");
- assert(Depth <= MaxDepth && "Limit Search Depth");
+static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
+ APInt &KnownOne, const DataLayout &DL,
+ unsigned Depth, const Query &Q) {
unsigned BitWidth = KnownZero.getBitWidth();
- assert((V->getType()->isIntOrIntVectorTy() ||
- V->getType()->getScalarType()->isPointerTy()) &&
- "Not integer or pointer type!");
- assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
- (!V->getType()->isIntOrIntVectorTy() ||
- V->getType()->getScalarSizeInBits() == BitWidth) &&
- KnownZero.getBitWidth() == BitWidth &&
- KnownOne.getBitWidth() == BitWidth &&
- "V, KnownOne and KnownZero should have same BitWidth");
-
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- // We know all of the bits for a constant!
- KnownOne = CI->getValue();
- KnownZero = ~KnownOne;
- return;
- }
- // Null and aggregate-zero are all-zeros.
- if (isa<ConstantPointerNull>(V) ||
- isa<ConstantAggregateZero>(V)) {
- KnownOne.clearAllBits();
- KnownZero = APInt::getAllOnesValue(BitWidth);
- return;
- }
- // Handle a constant vector by taking the intersection of the known bits of
- // each element. There is no real need to handle ConstantVector here, because
- // we don't handle undef in any particularly useful way.
- if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
- // We know that CDS must be a vector of integers. Take the intersection of
- // each element.
- KnownZero.setAllBits(); KnownOne.setAllBits();
- APInt Elt(KnownZero.getBitWidth(), 0);
- for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
- Elt = CDS->getElementAsInteger(i);
- KnownZero &= ~Elt;
- KnownOne &= Elt;
- }
- return;
- }
-
- // The address of an aligned GlobalValue has trailing zeros.
- if (auto *GO = dyn_cast<GlobalObject>(V)) {
- unsigned Align = GO->getAlignment();
- if (Align == 0) {
- if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
- Type *ObjectType = GVar->getType()->getElementType();
- if (ObjectType->isSized()) {
- // If the object is defined in the current Module, we'll be giving
- // it the preferred alignment. Otherwise, we have to assume that it
- // may only have the minimum ABI alignment.
- if (!GVar->isDeclaration() && !GVar->isWeakForLinker())
- Align = DL.getPreferredAlignment(GVar);
- else
- Align = DL.getABITypeAlignment(ObjectType);
- }
- }
- }
- if (Align > 0)
- KnownZero = APInt::getLowBitsSet(BitWidth,
- countTrailingZeros(Align));
- else
- KnownZero.clearAllBits();
- KnownOne.clearAllBits();
- return;
- }
-
- if (Argument *A = dyn_cast<Argument>(V)) {
- unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
-
- if (!Align && A->hasStructRetAttr()) {
- // An sret parameter has at least the ABI alignment of the return type.
- Type *EltTy = cast<PointerType>(A->getType())->getElementType();
- if (EltTy->isSized())
- Align = DL.getABITypeAlignment(EltTy);
- }
-
- if (Align)
- KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
- else
- KnownZero.clearAllBits();
- KnownOne.clearAllBits();
-
- // Don't give up yet... there might be an assumption that provides more
- // information...
- computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
-
- // Or a dominating condition for that matter
- if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
- computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL,
- Depth, Q);
- return;
- }
-
- // Start out not knowing anything.
- KnownZero.clearAllBits(); KnownOne.clearAllBits();
-
- // Limit search depth.
- // All recursive calls that increase depth must come after this.
- if (Depth == MaxDepth)
- return;
-
- // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
- // the bits of its aliasee.
- if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
- if (!GA->mayBeOverridden())
- computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q);
- return;
- }
-
- // Check whether a nearby assume intrinsic can determine some known bits.
- computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
-
- // Check whether there's a dominating condition which implies something about
- // this value at the given context.
- if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
- computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth,
- Q);
-
- Operator *I = dyn_cast<Operator>(V);
- if (!I) return;
-
APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
switch (I->getOpcode()) {
default: break;
@@ -1328,7 +1197,7 @@ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
}
case Instruction::Alloca: {
- AllocaInst *AI = cast<AllocaInst>(V);
+ AllocaInst *AI = cast<AllocaInst>(I);
unsigned Align = AI->getAlignment();
if (Align == 0)
Align = DL.getABITypeAlignment(AI->getType()->getElementType());
@@ -1523,6 +1392,151 @@ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
}
}
}
+}
+
+/// Determine which bits of V are known to be either zero or one and return
+/// them in the KnownZero/KnownOne bit sets.
+///
+/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
+/// we cannot optimize based on the assumption that it is zero without changing
+/// it to be an explicit zero. If we don't change it to zero, other code could
+/// optimized based on the contradictory assumption that it is non-zero.
+/// Because instcombine aggressively folds operations with undef args anyway,
+/// this won't lose us code quality.
+///
+/// This function is defined on values with integer type, values with pointer
+/// type, and vectors of integers. In the case
+/// where V is a vector, known zero, and known one values are the
+/// same width as the vector element, and the bit is set only if it is true
+/// for all of the elements in the vector.
+void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ const DataLayout &DL, unsigned Depth, const Query &Q) {
+ assert(V && "No Value?");
+ assert(Depth <= MaxDepth && "Limit Search Depth");
+ unsigned BitWidth = KnownZero.getBitWidth();
+
+ assert((V->getType()->isIntOrIntVectorTy() ||
+ V->getType()->getScalarType()->isPointerTy()) &&
+ "Not integer or pointer type!");
+ assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
+ (!V->getType()->isIntOrIntVectorTy() ||
+ V->getType()->getScalarSizeInBits() == BitWidth) &&
+ KnownZero.getBitWidth() == BitWidth &&
+ KnownOne.getBitWidth() == BitWidth &&
+ "V, KnownOne and KnownZero should have same BitWidth");
+
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
+ // We know all of the bits for a constant!
+ KnownOne = CI->getValue();
+ KnownZero = ~KnownOne;
+ return;
+ }
+ // Null and aggregate-zero are all-zeros.
+ if (isa<ConstantPointerNull>(V) ||
+ isa<ConstantAggregateZero>(V)) {
+ KnownOne.clearAllBits();
+ KnownZero = APInt::getAllOnesValue(BitWidth);
+ return;
+ }
+ // Handle a constant vector by taking the intersection of the known bits of
+ // each element. There is no real need to handle ConstantVector here, because
+ // we don't handle undef in any particularly useful way.
+ if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
+ // We know that CDS must be a vector of integers. Take the intersection of
+ // each element.
+ KnownZero.setAllBits(); KnownOne.setAllBits();
+ APInt Elt(KnownZero.getBitWidth(), 0);
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ Elt = CDS->getElementAsInteger(i);
+ KnownZero &= ~Elt;
+ KnownOne &= Elt;
+ }
+ return;
+ }
+
+ // The address of an aligned GlobalValue has trailing zeros.
+ if (auto *GO = dyn_cast<GlobalObject>(V)) {
+ unsigned Align = GO->getAlignment();
+ if (Align == 0) {
+ if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
+ Type *ObjectType = GVar->getType()->getElementType();
+ if (ObjectType->isSized()) {
+ // If the object is defined in the current Module, we'll be giving
+ // it the preferred alignment. Otherwise, we have to assume that it
+ // may only have the minimum ABI alignment.
+ if (!GVar->isDeclaration() && !GVar->isWeakForLinker())
+ Align = DL.getPreferredAlignment(GVar);
+ else
+ Align = DL.getABITypeAlignment(ObjectType);
+ }
+ }
+ }
+ if (Align > 0)
+ KnownZero = APInt::getLowBitsSet(BitWidth,
+ countTrailingZeros(Align));
+ else
+ KnownZero.clearAllBits();
+ KnownOne.clearAllBits();
+ return;
+ }
+
+ if (Argument *A = dyn_cast<Argument>(V)) {
+ unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
+
+ if (!Align && A->hasStructRetAttr()) {
+ // An sret parameter has at least the ABI alignment of the return type.
+ Type *EltTy = cast<PointerType>(A->getType())->getElementType();
+ if (EltTy->isSized())
+ Align = DL.getABITypeAlignment(EltTy);
+ }
+
+ if (Align)
+ KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
+ else
+ KnownZero.clearAllBits();
+ KnownOne.clearAllBits();
+
+ // Don't give up yet... there might be an assumption that provides more
+ // information...
+ computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
+
+ // Or a dominating condition for that matter
+ if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
+ computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL,
+ Depth, Q);
+ return;
+ }
+
+ // Start out not knowing anything.
+ KnownZero.clearAllBits(); KnownOne.clearAllBits();
+
+ // Limit search depth.
+ // All recursive calls that increase depth must come after this.
+ if (Depth == MaxDepth)
+ return;
+
+ // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
+ // the bits of its aliasee.
+ if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+ if (!GA->mayBeOverridden())
+ computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q);
+ return;
+ }
+
+ if (Operator *I = dyn_cast<Operator>(V))
+ computeKnownBitsFromOperator(I, KnownZero, KnownOne, DL, Depth, Q);
+ // computeKnownBitsFromAssume and computeKnownBitsFromDominatingCondition
+ // strictly refines KnownZero and KnownOne. Therefore, we run them after
+ // computeKnownBitsFromOperator.
+
+ // Check whether a nearby assume intrinsic can determine some known bits.
+ computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
+
+ // Check whether there's a dominating condition which implies something about
+ // this value at the given context.
+ if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
+ computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth,
+ Q);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
}
diff --git a/lib/AsmParser/CMakeLists.txt b/lib/AsmParser/CMakeLists.txt
index 78668377d13e..0d7272321059 100644
--- a/lib/AsmParser/CMakeLists.txt
+++ b/lib/AsmParser/CMakeLists.txt
@@ -6,4 +6,7 @@ add_llvm_library(LLVMAsmParser
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/Analysis
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp
index 09fe6c0a0bd8..0bdc3506a30a 100644
--- a/lib/AsmParser/LLLexer.cpp
+++ b/lib/AsmParser/LLLexer.cpp
@@ -628,6 +628,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(ssp);
KEYWORD(sspreq);
KEYWORD(sspstrong);
+ KEYWORD(safestack);
KEYWORD(sanitize_address);
KEYWORD(sanitize_thread);
KEYWORD(sanitize_memory);
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index 681af2a90072..a121e59e1f10 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -670,6 +670,9 @@ bool LLParser::ParseAlias(const std::string &Name, LocTy NameLoc, unsigned L,
GA->setDLLStorageClass((GlobalValue::DLLStorageClassTypes)DLLStorageClass);
GA->setUnnamedAddr(UnnamedAddr);
+ if (Name.empty())
+ NumberedVals.push_back(GA.get());
+
// See if this value already exists in the symbol table. If so, it is either
// a redefinition or a definition of a forward reference.
if (GlobalValue *Val = M->getNamedValue(Name)) {
@@ -958,6 +961,7 @@ bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B,
case lltok::kw_ssp: B.addAttribute(Attribute::StackProtect); break;
case lltok::kw_sspreq: B.addAttribute(Attribute::StackProtectReq); break;
case lltok::kw_sspstrong: B.addAttribute(Attribute::StackProtectStrong); break;
+ case lltok::kw_safestack: B.addAttribute(Attribute::SafeStack); break;
case lltok::kw_sanitize_address: B.addAttribute(Attribute::SanitizeAddress); break;
case lltok::kw_sanitize_thread: B.addAttribute(Attribute::SanitizeThread); break;
case lltok::kw_sanitize_memory: B.addAttribute(Attribute::SanitizeMemory); break;
@@ -1267,6 +1271,7 @@ bool LLParser::ParseOptionalParamAttrs(AttrBuilder &B) {
case lltok::kw_ssp:
case lltok::kw_sspreq:
case lltok::kw_sspstrong:
+ case lltok::kw_safestack:
case lltok::kw_uwtable:
HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute");
break;
@@ -1343,6 +1348,7 @@ bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) {
case lltok::kw_ssp:
case lltok::kw_sspreq:
case lltok::kw_sspstrong:
+ case lltok::kw_safestack:
case lltok::kw_uwtable:
HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute");
break;
@@ -4051,7 +4057,7 @@ bool LLParser::ParseTypeAndBasicBlock(BasicBlock *&BB, LocTy &Loc,
/// FunctionHeader
/// ::= OptionalLinkage OptionalVisibility OptionalCallingConv OptRetAttrs
/// OptUnnamedAddr Type GlobalName '(' ArgList ')' OptFuncAttrs OptSection
-/// OptionalAlign OptGC OptionalPrefix OptionalPrologue
+/// OptionalAlign OptGC OptionalPrefix OptionalPrologue OptPersonalityFn
bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
// Parse the linkage.
LocTy LinkageLoc = Lex.getLoc();
@@ -4133,6 +4139,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
LocTy UnnamedAddrLoc;
Constant *Prefix = nullptr;
Constant *Prologue = nullptr;
+ Constant *PersonalityFn = nullptr;
Comdat *C;
if (ParseArgumentList(ArgList, isVarArg) ||
@@ -4149,7 +4156,9 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
(EatIfPresent(lltok::kw_prefix) &&
ParseGlobalTypeAndValue(Prefix)) ||
(EatIfPresent(lltok::kw_prologue) &&
- ParseGlobalTypeAndValue(Prologue)))
+ ParseGlobalTypeAndValue(Prologue)) ||
+ (EatIfPresent(lltok::kw_personality) &&
+ ParseGlobalTypeAndValue(PersonalityFn)))
return true;
if (FuncAttrs.contains(Attribute::Builtin))
@@ -4248,6 +4257,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
Fn->setAlignment(Alignment);
Fn->setSection(Section);
Fn->setComdat(C);
+ Fn->setPersonalityFn(PersonalityFn);
if (!GC.empty()) Fn->setGC(GC.c_str());
Fn->setPrefixData(Prefix);
Fn->setPrologueData(Prologue);
@@ -5099,14 +5109,11 @@ int LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) {
/// ::= 'filter' TypeAndValue ( ',' TypeAndValue )*
bool LLParser::ParseLandingPad(Instruction *&Inst, PerFunctionState &PFS) {
Type *Ty = nullptr; LocTy TyLoc;
- Value *PersFn; LocTy PersFnLoc;
- if (ParseType(Ty, TyLoc) ||
- ParseToken(lltok::kw_personality, "expected 'personality'") ||
- ParseTypeAndValue(PersFn, PersFnLoc, PFS))
+ if (ParseType(Ty, TyLoc))
return true;
- std::unique_ptr<LandingPadInst> LP(LandingPadInst::Create(Ty, PersFn, 0));
+ std::unique_ptr<LandingPadInst> LP(LandingPadInst::Create(Ty, 0));
LP->setCleanup(EatIfPresent(lltok::kw_cleanup));
while (Lex.getKind() == lltok::kw_catch || Lex.getKind() == lltok::kw_filter){
diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h
index a43a4b06a946..9f554c023f08 100644
--- a/lib/AsmParser/LLParser.h
+++ b/lib/AsmParser/LLParser.h
@@ -469,6 +469,6 @@ namespace llvm {
bool ParseUseListOrderIndexes(SmallVectorImpl<unsigned> &Indexes);
bool sortUseListOrder(Value *V, ArrayRef<unsigned> Indexes, SMLoc Loc);
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h
index c47f5e1654db..2487d1208133 100644
--- a/lib/AsmParser/LLToken.h
+++ b/lib/AsmParser/LLToken.h
@@ -135,6 +135,7 @@ namespace lltok {
kw_ssp,
kw_sspreq,
kw_sspstrong,
+ kw_safestack,
kw_sret,
kw_sanitize_thread,
kw_sanitize_memory,
diff --git a/lib/Bitcode/Reader/BitReader.cpp b/lib/Bitcode/Reader/BitReader.cpp
index 868fbf010db3..289c76e85b4b 100644
--- a/lib/Bitcode/Reader/BitReader.cpp
+++ b/lib/Bitcode/Reader/BitReader.cpp
@@ -39,7 +39,7 @@ LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef,
raw_string_ostream Stream(Message);
DiagnosticPrinterRawOStream DP(Stream);
- ErrorOr<Module *> ModuleOrErr = parseBitcodeFile(
+ ErrorOr<std::unique_ptr<Module>> ModuleOrErr = parseBitcodeFile(
Buf, Ctx, [&](const DiagnosticInfo &DI) { DI.print(DP); });
if (ModuleOrErr.getError()) {
if (OutMessage) {
@@ -50,7 +50,7 @@ LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef,
return 1;
}
- *OutModule = wrap(ModuleOrErr.get());
+ *OutModule = wrap(ModuleOrErr.get().release());
return 0;
}
@@ -64,7 +64,7 @@ LLVMBool LLVMGetBitcodeModuleInContext(LLVMContextRef ContextRef,
std::string Message;
std::unique_ptr<MemoryBuffer> Owner(unwrap(MemBuf));
- ErrorOr<Module *> ModuleOrErr =
+ ErrorOr<std::unique_ptr<Module>> ModuleOrErr =
getLazyBitcodeModule(std::move(Owner), *unwrap(ContextRef));
Owner.release();
@@ -75,7 +75,7 @@ LLVMBool LLVMGetBitcodeModuleInContext(LLVMContextRef ContextRef,
return 1;
}
- *OutM = wrap(ModuleOrErr.get());
+ *OutM = wrap(ModuleOrErr.get().release());
return 0;
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 056d87beef15..0cadd6c5555b 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -44,9 +44,9 @@ enum {
class BitcodeReaderValueList {
std::vector<WeakVH> ValuePtrs;
- /// ResolveConstants - As we resolve forward-referenced constants, we add
- /// information about them to this vector. This allows us to resolve them in
- /// bulk instead of resolving each reference at a time. See the code in
+ /// As we resolve forward-referenced constants, we add information about them
+ /// to this vector. This allows us to resolve them in bulk instead of
+ /// resolving each reference at a time. See the code in
/// ResolveConstantForwardRefs for more information about this.
///
/// The key of this vector is the placeholder constant, the value is the slot
@@ -86,11 +86,11 @@ public:
Constant *getConstantFwdRef(unsigned Idx, Type *Ty);
Value *getValueFwdRef(unsigned Idx, Type *Ty);
- void AssignValue(Value *V, unsigned Idx);
+ void assignValue(Value *V, unsigned Idx);
- /// ResolveConstantForwardRefs - Once all constants are read, this method bulk
- /// resolves any forward references.
- void ResolveConstantForwardRefs();
+ /// Once all constants are read, this method bulk resolves any forward
+ /// references.
+ void resolveConstantForwardRefs();
};
class BitcodeReaderMDValueList {
@@ -125,20 +125,20 @@ public:
}
Metadata *getValueFwdRef(unsigned Idx);
- void AssignValue(Metadata *MD, unsigned Idx);
+ void assignValue(Metadata *MD, unsigned Idx);
void tryToResolveCycles();
};
class BitcodeReader : public GVMaterializer {
LLVMContext &Context;
DiagnosticHandlerFunction DiagnosticHandler;
- Module *TheModule;
+ Module *TheModule = nullptr;
std::unique_ptr<MemoryBuffer> Buffer;
std::unique_ptr<BitstreamReader> StreamFile;
BitstreamCursor Stream;
- DataStreamer *LazyStreamer;
- uint64_t NextUnreadBit;
- bool SeenValueSymbolTable;
+ bool IsStreamed;
+ uint64_t NextUnreadBit = 0;
+ bool SeenValueSymbolTable = false;
std::vector<Type*> TypeList;
BitcodeReaderValueList ValueList;
@@ -150,19 +150,19 @@ class BitcodeReader : public GVMaterializer {
std::vector<std::pair<GlobalAlias*, unsigned> > AliasInits;
std::vector<std::pair<Function*, unsigned> > FunctionPrefixes;
std::vector<std::pair<Function*, unsigned> > FunctionPrologues;
+ std::vector<std::pair<Function*, unsigned> > FunctionPersonalityFns;
SmallVector<Instruction*, 64> InstsWithTBAATag;
- /// MAttributes - The set of attributes by index. Index zero in the
- /// file is for null, and is thus not represented here. As such all indices
- /// are off by one.
+ /// The set of attributes by index. Index zero in the file is for null, and
+ /// is thus not represented here. As such all indices are off by one.
std::vector<AttributeSet> MAttributes;
/// \brief The set of attribute groups.
std::map<unsigned, AttributeSet> MAttributeGroups;
- /// FunctionBBs - While parsing a function body, this is a list of the basic
- /// blocks for the function.
+ /// While parsing a function body, this is a list of the basic blocks for the
+ /// function.
std::vector<BasicBlock*> FunctionBBs;
// When reading the module header, this list is populated with functions that
@@ -180,11 +180,10 @@ class BitcodeReader : public GVMaterializer {
// Several operations happen after the module header has been read, but
// before function bodies are processed. This keeps track of whether
// we've done this yet.
- bool SeenFirstFunctionBody;
+ bool SeenFirstFunctionBody = false;
- /// DeferredFunctionInfo - When function bodies are initially scanned, this
- /// map contains info about where to find deferred function body in the
- /// stream.
+ /// When function bodies are initially scanned, this map contains info about
+ /// where to find deferred function body in the stream.
DenseMap<Function*, uint64_t> DeferredFunctionInfo;
/// When Metadata block is initially scanned when parsing the module, we may
@@ -198,41 +197,40 @@ class BitcodeReader : public GVMaterializer {
DenseMap<Function *, std::vector<BasicBlock *>> BasicBlockFwdRefs;
std::deque<Function *> BasicBlockFwdRefQueue;
- /// UseRelativeIDs - Indicates that we are using a new encoding for
- /// instruction operands where most operands in the current
- /// FUNCTION_BLOCK are encoded relative to the instruction number,
- /// for a more compact encoding. Some instruction operands are not
- /// relative to the instruction ID: basic block numbers, and types.
- /// Once the old style function blocks have been phased out, we would
+ /// Indicates that we are using a new encoding for instruction operands where
+ /// most operands in the current FUNCTION_BLOCK are encoded relative to the
+ /// instruction number, for a more compact encoding. Some instruction
+ /// operands are not relative to the instruction ID: basic block numbers, and
+ /// types. Once the old style function blocks have been phased out, we would
/// not need this flag.
- bool UseRelativeIDs;
+ bool UseRelativeIDs = false;
/// True if all functions will be materialized, negating the need to process
/// (e.g.) blockaddress forward references.
- bool WillMaterializeAllForwardRefs;
+ bool WillMaterializeAllForwardRefs = false;
/// Functions that have block addresses taken. This is usually empty.
SmallPtrSet<const Function *, 4> BlockAddressesTaken;
/// True if any Metadata block has been materialized.
- bool IsMetadataMaterialized;
+ bool IsMetadataMaterialized = false;
bool StripDebugInfo = false;
public:
- std::error_code Error(BitcodeError E, const Twine &Message);
- std::error_code Error(BitcodeError E);
- std::error_code Error(const Twine &Message);
+ std::error_code error(BitcodeError E, const Twine &Message);
+ std::error_code error(BitcodeError E);
+ std::error_code error(const Twine &Message);
- explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C,
- DiagnosticHandlerFunction DiagnosticHandler);
- explicit BitcodeReader(DataStreamer *streamer, LLVMContext &C,
- DiagnosticHandlerFunction DiagnosticHandler);
- ~BitcodeReader() override { FreeState(); }
+ BitcodeReader(MemoryBuffer *Buffer, LLVMContext &Context,
+ DiagnosticHandlerFunction DiagnosticHandler);
+ BitcodeReader(LLVMContext &Context,
+ DiagnosticHandlerFunction DiagnosticHandler);
+ ~BitcodeReader() override { freeState(); }
std::error_code materializeForwardReferencedFunctions();
- void FreeState();
+ void freeState();
void releaseBuffer();
@@ -242,13 +240,14 @@ public:
std::vector<StructType *> getIdentifiedStructTypes() const override;
void dematerialize(GlobalValue *GV) override;
- /// @brief Main interface to parsing a bitcode buffer.
- /// @returns true if an error occurred.
- std::error_code ParseBitcodeInto(Module *M,
+ /// \brief Main interface to parsing a bitcode buffer.
+ /// \returns true if an error occurred.
+ std::error_code parseBitcodeInto(std::unique_ptr<DataStreamer> Streamer,
+ Module *M,
bool ShouldLazyLoadMetadata = false);
- /// @brief Cheap mechanism to just extract module triple
- /// @returns true if an error occurred.
+ /// \brief Cheap mechanism to just extract module triple
+ /// \returns true if an error occurred.
ErrorOr<std::string> parseTriple();
static uint64_t decodeSignRotatedValue(uint64_t V);
@@ -282,9 +281,9 @@ private:
return AttributeSet();
}
- /// getValueTypePair - Read a value/type pair out of the specified record from
- /// slot 'Slot'. Increment Slot past the number of slots used in the record.
- /// Return true on failure.
+ /// Read a value/type pair out of the specified record from slot 'Slot'.
+ /// Increment Slot past the number of slots used in the record. Return true on
+ /// failure.
bool getValueTypePair(SmallVectorImpl<uint64_t> &Record, unsigned &Slot,
unsigned InstNum, Value *&ResVal) {
if (Slot == Record.size()) return true;
@@ -306,9 +305,9 @@ private:
return ResVal == nullptr;
}
- /// popValue - Read a value out of the specified record from slot 'Slot'.
- /// Increment Slot past the number of slots used by the value in the record.
- /// Return true if there is an error.
+ /// Read a value out of the specified record from slot 'Slot'. Increment Slot
+ /// past the number of slots used by the value in the record. Return true if
+ /// there is an error.
bool popValue(SmallVectorImpl<uint64_t> &Record, unsigned &Slot,
unsigned InstNum, Type *Ty, Value *&ResVal) {
if (getValue(Record, Slot, InstNum, Ty, ResVal))
@@ -318,15 +317,15 @@ private:
return false;
}
- /// getValue -- Like popValue, but does not increment the Slot number.
+ /// Like popValue, but does not increment the Slot number.
bool getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot,
unsigned InstNum, Type *Ty, Value *&ResVal) {
ResVal = getValue(Record, Slot, InstNum, Ty);
return ResVal == nullptr;
}
- /// getValue -- Version of getValue that returns ResVal directly,
- /// or 0 if there is an error.
+ /// Version of getValue that returns ResVal directly, or 0 if there is an
+ /// error.
Value *getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot,
unsigned InstNum, Type *Ty) {
if (Slot == Record.size()) return nullptr;
@@ -337,7 +336,7 @@ private:
return getFnValueByID(ValNo, Ty);
}
- /// getValueSigned -- Like getValue, but decodes signed VBRs.
+ /// Like getValue, but decodes signed VBRs.
Value *getValueSigned(SmallVectorImpl<uint64_t> &Record, unsigned Slot,
unsigned InstNum, Type *Ty) {
if (Slot == Record.size()) return nullptr;
@@ -352,29 +351,29 @@ private:
/// corresponding alignment to use. If alignment is too large, returns
/// a corresponding error code.
std::error_code parseAlignmentValue(uint64_t Exponent, unsigned &Alignment);
- std::error_code ParseAttrKind(uint64_t Code, Attribute::AttrKind *Kind);
- std::error_code ParseModule(bool Resume, bool ShouldLazyLoadMetadata = false);
- std::error_code ParseAttributeBlock();
- std::error_code ParseAttributeGroupBlock();
- std::error_code ParseTypeTable();
- std::error_code ParseTypeTableBody();
-
- std::error_code ParseValueSymbolTable();
- std::error_code ParseConstants();
- std::error_code RememberAndSkipFunctionBody();
+ std::error_code parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind);
+ std::error_code parseModule(bool Resume, bool ShouldLazyLoadMetadata = false);
+ std::error_code parseAttributeBlock();
+ std::error_code parseAttributeGroupBlock();
+ std::error_code parseTypeTable();
+ std::error_code parseTypeTableBody();
+
+ std::error_code parseValueSymbolTable();
+ std::error_code parseConstants();
+ std::error_code rememberAndSkipFunctionBody();
/// Save the positions of the Metadata blocks and skip parsing the blocks.
std::error_code rememberAndSkipMetadata();
- std::error_code ParseFunctionBody(Function *F);
- std::error_code GlobalCleanup();
- std::error_code ResolveGlobalAndAliasInits();
- std::error_code ParseMetadata();
- std::error_code ParseMetadataAttachment(Function &F);
+ std::error_code parseFunctionBody(Function *F);
+ std::error_code globalCleanup();
+ std::error_code resolveGlobalAndAliasInits();
+ std::error_code parseMetadata();
+ std::error_code parseMetadataAttachment(Function &F);
ErrorOr<std::string> parseModuleTriple();
- std::error_code ParseUseLists();
- std::error_code InitStream();
- std::error_code InitStreamFromBuffer();
- std::error_code InitLazyStream();
- std::error_code FindFunctionInStream(
+ std::error_code parseUseLists();
+ std::error_code initStream(std::unique_ptr<DataStreamer> Streamer);
+ std::error_code initStreamFromBuffer();
+ std::error_code initLazyStream(std::unique_ptr<DataStreamer> Streamer);
+ std::error_code findFunctionInStream(
Function *F,
DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator);
};
@@ -387,35 +386,35 @@ BitcodeDiagnosticInfo::BitcodeDiagnosticInfo(std::error_code EC,
void BitcodeDiagnosticInfo::print(DiagnosticPrinter &DP) const { DP << Msg; }
-static std::error_code Error(DiagnosticHandlerFunction DiagnosticHandler,
+static std::error_code error(DiagnosticHandlerFunction DiagnosticHandler,
std::error_code EC, const Twine &Message) {
BitcodeDiagnosticInfo DI(EC, DS_Error, Message);
DiagnosticHandler(DI);
return EC;
}
-static std::error_code Error(DiagnosticHandlerFunction DiagnosticHandler,
+static std::error_code error(DiagnosticHandlerFunction DiagnosticHandler,
std::error_code EC) {
- return Error(DiagnosticHandler, EC, EC.message());
+ return error(DiagnosticHandler, EC, EC.message());
}
-static std::error_code Error(DiagnosticHandlerFunction DiagnosticHandler,
+static std::error_code error(DiagnosticHandlerFunction DiagnosticHandler,
const Twine &Message) {
- return Error(DiagnosticHandler,
+ return error(DiagnosticHandler,
make_error_code(BitcodeError::CorruptedBitcode), Message);
}
-std::error_code BitcodeReader::Error(BitcodeError E, const Twine &Message) {
- return ::Error(DiagnosticHandler, make_error_code(E), Message);
+std::error_code BitcodeReader::error(BitcodeError E, const Twine &Message) {
+ return ::error(DiagnosticHandler, make_error_code(E), Message);
}
-std::error_code BitcodeReader::Error(const Twine &Message) {
- return ::Error(DiagnosticHandler,
+std::error_code BitcodeReader::error(const Twine &Message) {
+ return ::error(DiagnosticHandler,
make_error_code(BitcodeError::CorruptedBitcode), Message);
}
-std::error_code BitcodeReader::Error(BitcodeError E) {
- return ::Error(DiagnosticHandler, make_error_code(E));
+std::error_code BitcodeReader::error(BitcodeError E) {
+ return ::error(DiagnosticHandler, make_error_code(E));
}
static DiagnosticHandlerFunction getDiagHandler(DiagnosticHandlerFunction F,
@@ -425,21 +424,19 @@ static DiagnosticHandlerFunction getDiagHandler(DiagnosticHandlerFunction F,
return [&C](const DiagnosticInfo &DI) { C.diagnose(DI); };
}
-BitcodeReader::BitcodeReader(MemoryBuffer *buffer, LLVMContext &C,
+BitcodeReader::BitcodeReader(MemoryBuffer *Buffer, LLVMContext &Context,
DiagnosticHandlerFunction DiagnosticHandler)
- : Context(C), DiagnosticHandler(getDiagHandler(DiagnosticHandler, C)),
- TheModule(nullptr), Buffer(buffer), LazyStreamer(nullptr),
- NextUnreadBit(0), SeenValueSymbolTable(false), ValueList(C),
- MDValueList(C), SeenFirstFunctionBody(false), UseRelativeIDs(false),
- WillMaterializeAllForwardRefs(false), IsMetadataMaterialized(false) {}
+ : Context(Context),
+ DiagnosticHandler(getDiagHandler(DiagnosticHandler, Context)),
+ Buffer(Buffer), IsStreamed(false), ValueList(Context),
+ MDValueList(Context) {}
-BitcodeReader::BitcodeReader(DataStreamer *streamer, LLVMContext &C,
+BitcodeReader::BitcodeReader(LLVMContext &Context,
DiagnosticHandlerFunction DiagnosticHandler)
- : Context(C), DiagnosticHandler(getDiagHandler(DiagnosticHandler, C)),
- TheModule(nullptr), Buffer(nullptr), LazyStreamer(streamer),
- NextUnreadBit(0), SeenValueSymbolTable(false), ValueList(C),
- MDValueList(C), SeenFirstFunctionBody(false), UseRelativeIDs(false),
- WillMaterializeAllForwardRefs(false), IsMetadataMaterialized(false) {}
+ : Context(Context),
+ DiagnosticHandler(getDiagHandler(DiagnosticHandler, Context)),
+ Buffer(nullptr), IsStreamed(true), ValueList(Context),
+ MDValueList(Context) {}
std::error_code BitcodeReader::materializeForwardReferencedFunctions() {
if (WillMaterializeAllForwardRefs)
@@ -461,7 +458,7 @@ std::error_code BitcodeReader::materializeForwardReferencedFunctions() {
// isn't a trivial way to check if a function will have a body without a
// linear search through FunctionsWithBodies, so just check it here.
if (!F->isMaterializable())
- return Error("Never resolved function from blockaddress");
+ return error("Never resolved function from blockaddress");
// Try to materialize F.
if (std::error_code EC = materialize(F))
@@ -474,7 +471,7 @@ std::error_code BitcodeReader::materializeForwardReferencedFunctions() {
return std::error_code();
}
-void BitcodeReader::FreeState() {
+void BitcodeReader::freeState() {
Buffer = nullptr;
std::vector<Type*>().swap(TypeList);
ValueList.clear();
@@ -496,10 +493,9 @@ void BitcodeReader::FreeState() {
// Helper functions to implement forward reference resolution, etc.
//===----------------------------------------------------------------------===//
-/// ConvertToString - Convert a string from a record into an std::string, return
-/// true on failure.
-template<typename StrTy>
-static bool ConvertToString(ArrayRef<uint64_t> Record, unsigned Idx,
+/// Convert a string from a record into an std::string, return true on failure.
+template <typename StrTy>
+static bool convertToString(ArrayRef<uint64_t> Record, unsigned Idx,
StrTy &Result) {
if (Idx > Record.size())
return true;
@@ -563,7 +559,7 @@ static GlobalValue::LinkageTypes getDecodedLinkage(unsigned Val) {
}
}
-static GlobalValue::VisibilityTypes GetDecodedVisibility(unsigned Val) {
+static GlobalValue::VisibilityTypes getDecodedVisibility(unsigned Val) {
switch (Val) {
default: // Map unknown visibilities to default.
case 0: return GlobalValue::DefaultVisibility;
@@ -573,7 +569,7 @@ static GlobalValue::VisibilityTypes GetDecodedVisibility(unsigned Val) {
}
static GlobalValue::DLLStorageClassTypes
-GetDecodedDLLStorageClass(unsigned Val) {
+getDecodedDLLStorageClass(unsigned Val) {
switch (Val) {
default: // Map unknown values to default.
case 0: return GlobalValue::DefaultStorageClass;
@@ -582,7 +578,7 @@ GetDecodedDLLStorageClass(unsigned Val) {
}
}
-static GlobalVariable::ThreadLocalMode GetDecodedThreadLocalMode(unsigned Val) {
+static GlobalVariable::ThreadLocalMode getDecodedThreadLocalMode(unsigned Val) {
switch (Val) {
case 0: return GlobalVariable::NotThreadLocal;
default: // Map unknown non-zero value to general dynamic.
@@ -593,7 +589,7 @@ static GlobalVariable::ThreadLocalMode GetDecodedThreadLocalMode(unsigned Val) {
}
}
-static int GetDecodedCastOpcode(unsigned Val) {
+static int getDecodedCastOpcode(unsigned Val) {
switch (Val) {
default: return -1;
case bitc::CAST_TRUNC : return Instruction::Trunc;
@@ -612,7 +608,7 @@ static int GetDecodedCastOpcode(unsigned Val) {
}
}
-static int GetDecodedBinaryOpcode(unsigned Val, Type *Ty) {
+static int getDecodedBinaryOpcode(unsigned Val, Type *Ty) {
bool IsFP = Ty->isFPOrFPVectorTy();
// BinOps are only valid for int/fp or vector of int/fp types
if (!IsFP && !Ty->isIntOrIntVectorTy())
@@ -650,7 +646,7 @@ static int GetDecodedBinaryOpcode(unsigned Val, Type *Ty) {
}
}
-static AtomicRMWInst::BinOp GetDecodedRMWOperation(unsigned Val) {
+static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) {
switch (Val) {
default: return AtomicRMWInst::BAD_BINOP;
case bitc::RMW_XCHG: return AtomicRMWInst::Xchg;
@@ -667,7 +663,7 @@ static AtomicRMWInst::BinOp GetDecodedRMWOperation(unsigned Val) {
}
}
-static AtomicOrdering GetDecodedOrdering(unsigned Val) {
+static AtomicOrdering getDecodedOrdering(unsigned Val) {
switch (Val) {
case bitc::ORDERING_NOTATOMIC: return NotAtomic;
case bitc::ORDERING_UNORDERED: return Unordered;
@@ -680,7 +676,7 @@ static AtomicOrdering GetDecodedOrdering(unsigned Val) {
}
}
-static SynchronizationScope GetDecodedSynchScope(unsigned Val) {
+static SynchronizationScope getDecodedSynchScope(unsigned Val) {
switch (Val) {
case bitc::SYNCHSCOPE_SINGLETHREAD: return SingleThread;
default: // Map unknown scopes to cross-thread.
@@ -704,7 +700,7 @@ static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) {
}
}
-static void UpgradeDLLImportExportLinkage(llvm::GlobalValue *GV, unsigned Val) {
+static void upgradeDLLImportExportLinkage(llvm::GlobalValue *GV, unsigned Val) {
switch (Val) {
case 5: GV->setDLLStorageClass(GlobalValue::DLLImportStorageClass); break;
case 6: GV->setDLLStorageClass(GlobalValue::DLLExportStorageClass); break;
@@ -713,31 +709,29 @@ static void UpgradeDLLImportExportLinkage(llvm::GlobalValue *GV, unsigned Val) {
namespace llvm {
namespace {
- /// @brief A class for maintaining the slot number definition
- /// as a placeholder for the actual definition for forward constants defs.
- class ConstantPlaceHolder : public ConstantExpr {
- void operator=(const ConstantPlaceHolder &) = delete;
- public:
- // allocate space for exactly one operand
- void *operator new(size_t s) {
- return User::operator new(s, 1);
- }
- explicit ConstantPlaceHolder(Type *Ty, LLVMContext& Context)
- : ConstantExpr(Ty, Instruction::UserOp1, &Op<0>(), 1) {
- Op<0>() = UndefValue::get(Type::getInt32Ty(Context));
- }
+/// \brief A class for maintaining the slot number definition
+/// as a placeholder for the actual definition for forward constants defs.
+class ConstantPlaceHolder : public ConstantExpr {
+ void operator=(const ConstantPlaceHolder &) = delete;
- /// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
- static bool classof(const Value *V) {
- return isa<ConstantExpr>(V) &&
- cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1;
- }
+public:
+ // allocate space for exactly one operand
+ void *operator new(size_t s) { return User::operator new(s, 1); }
+ explicit ConstantPlaceHolder(Type *Ty, LLVMContext &Context)
+ : ConstantExpr(Ty, Instruction::UserOp1, &Op<0>(), 1) {
+ Op<0>() = UndefValue::get(Type::getInt32Ty(Context));
+ }
+ /// \brief Methods to support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) &&
+ cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1;
+ }
- /// Provide fast operand accessors
- DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
- };
-}
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+};
+} // namespace
// FIXME: can we inherit this from ConstantExpr?
template <>
@@ -745,10 +739,9 @@ struct OperandTraits<ConstantPlaceHolder> :
public FixedNumOperandTraits<ConstantPlaceHolder, 1> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantPlaceHolder, Value)
-}
+} // namespace llvm
-
-void BitcodeReaderValueList::AssignValue(Value *V, unsigned Idx) {
+void BitcodeReaderValueList::assignValue(Value *V, unsigned Idx) {
if (Idx == size()) {
push_back(V);
return;
@@ -818,14 +811,13 @@ Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty) {
return V;
}
-/// ResolveConstantForwardRefs - Once all constants are read, this method bulk
-/// resolves any forward references. The idea behind this is that we sometimes
-/// get constants (such as large arrays) which reference *many* forward ref
-/// constants. Replacing each of these causes a lot of thrashing when
-/// building/reuniquing the constant. Instead of doing this, we look at all the
-/// uses and rewrite all the place holders at once for any constant that uses
-/// a placeholder.
-void BitcodeReaderValueList::ResolveConstantForwardRefs() {
+/// Once all constants are read, this method bulk resolves any forward
+/// references. The idea behind this is that we sometimes get constants (such
+/// as large arrays) which reference *many* forward ref constants. Replacing
+/// each of these causes a lot of thrashing when building/reuniquing the
+/// constant. Instead of doing this, we look at all the uses and rewrite all
+/// the place holders at once for any constant that uses a placeholder.
+void BitcodeReaderValueList::resolveConstantForwardRefs() {
// Sort the values by-pointer so that they are efficient to look up with a
// binary search.
std::sort(ResolveConstants.begin(), ResolveConstants.end());
@@ -900,7 +892,7 @@ void BitcodeReaderValueList::ResolveConstantForwardRefs() {
}
}
-void BitcodeReaderMDValueList::AssignValue(Metadata *MD, unsigned Idx) {
+void BitcodeReaderMDValueList::assignValue(Metadata *MD, unsigned Idx) {
if (Idx == size()) {
push_back(MD);
return;
@@ -1019,12 +1011,12 @@ static void decodeLLVMAttributesForBitcode(AttrBuilder &B,
(EncodedAttrs & 0xffff));
}
-std::error_code BitcodeReader::ParseAttributeBlock() {
+std::error_code BitcodeReader::parseAttributeBlock() {
if (Stream.EnterSubBlock(bitc::PARAMATTR_BLOCK_ID))
- return Error("Invalid record");
+ return error("Invalid record");
if (!MAttributes.empty())
- return Error("Invalid multiple blocks");
+ return error("Invalid multiple blocks");
SmallVector<uint64_t, 64> Record;
@@ -1037,7 +1029,7 @@ std::error_code BitcodeReader::ParseAttributeBlock() {
switch (Entry.Kind) {
case BitstreamEntry::SubBlock: // Handled for us already.
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
return std::error_code();
case BitstreamEntry::Record:
@@ -1053,7 +1045,7 @@ std::error_code BitcodeReader::ParseAttributeBlock() {
case bitc::PARAMATTR_CODE_ENTRY_OLD: { // ENTRY: [paramidx0, attr0, ...]
// FIXME: Remove in 4.0.
if (Record.size() & 1)
- return Error("Invalid record");
+ return error("Invalid record");
for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
AttrBuilder B;
@@ -1078,7 +1070,7 @@ std::error_code BitcodeReader::ParseAttributeBlock() {
}
// Returns Attribute::None on unrecognized codes.
-static Attribute::AttrKind GetAttrFromCode(uint64_t Code) {
+static Attribute::AttrKind getAttrFromCode(uint64_t Code) {
switch (Code) {
default:
return Attribute::None;
@@ -1156,6 +1148,8 @@ static Attribute::AttrKind GetAttrFromCode(uint64_t Code) {
return Attribute::StackProtectReq;
case bitc::ATTR_KIND_STACK_PROTECT_STRONG:
return Attribute::StackProtectStrong;
+ case bitc::ATTR_KIND_SAFESTACK:
+ return Attribute::SafeStack;
case bitc::ATTR_KIND_STRUCT_RET:
return Attribute::StructRet;
case bitc::ATTR_KIND_SANITIZE_ADDRESS:
@@ -1176,26 +1170,26 @@ std::error_code BitcodeReader::parseAlignmentValue(uint64_t Exponent,
// Note: Alignment in bitcode files is incremented by 1, so that zero
// can be used for default alignment.
if (Exponent > Value::MaxAlignmentExponent + 1)
- return Error("Invalid alignment value");
+ return error("Invalid alignment value");
Alignment = (1 << static_cast<unsigned>(Exponent)) >> 1;
return std::error_code();
}
-std::error_code BitcodeReader::ParseAttrKind(uint64_t Code,
+std::error_code BitcodeReader::parseAttrKind(uint64_t Code,
Attribute::AttrKind *Kind) {
- *Kind = GetAttrFromCode(Code);
+ *Kind = getAttrFromCode(Code);
if (*Kind == Attribute::None)
- return Error(BitcodeError::CorruptedBitcode,
+ return error(BitcodeError::CorruptedBitcode,
"Unknown attribute kind (" + Twine(Code) + ")");
return std::error_code();
}
-std::error_code BitcodeReader::ParseAttributeGroupBlock() {
+std::error_code BitcodeReader::parseAttributeGroupBlock() {
if (Stream.EnterSubBlock(bitc::PARAMATTR_GROUP_BLOCK_ID))
- return Error("Invalid record");
+ return error("Invalid record");
if (!MAttributeGroups.empty())
- return Error("Invalid multiple blocks");
+ return error("Invalid multiple blocks");
SmallVector<uint64_t, 64> Record;
@@ -1206,7 +1200,7 @@ std::error_code BitcodeReader::ParseAttributeGroupBlock() {
switch (Entry.Kind) {
case BitstreamEntry::SubBlock: // Handled for us already.
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
return std::error_code();
case BitstreamEntry::Record:
@@ -1221,7 +1215,7 @@ std::error_code BitcodeReader::ParseAttributeGroupBlock() {
break;
case bitc::PARAMATTR_GRP_CODE_ENTRY: { // ENTRY: [grpid, idx, a0, a1, ...]
if (Record.size() < 3)
- return Error("Invalid record");
+ return error("Invalid record");
uint64_t GrpID = Record[0];
uint64_t Idx = Record[1]; // Index of the object this attribute refers to.
@@ -1230,13 +1224,13 @@ std::error_code BitcodeReader::ParseAttributeGroupBlock() {
for (unsigned i = 2, e = Record.size(); i != e; ++i) {
if (Record[i] == 0) { // Enum attribute
Attribute::AttrKind Kind;
- if (std::error_code EC = ParseAttrKind(Record[++i], &Kind))
+ if (std::error_code EC = parseAttrKind(Record[++i], &Kind))
return EC;
B.addAttribute(Kind);
} else if (Record[i] == 1) { // Integer attribute
Attribute::AttrKind Kind;
- if (std::error_code EC = ParseAttrKind(Record[++i], &Kind))
+ if (std::error_code EC = parseAttrKind(Record[++i], &Kind))
return EC;
if (Kind == Attribute::Alignment)
B.addAlignmentAttr(Record[++i]);
@@ -1276,16 +1270,16 @@ std::error_code BitcodeReader::ParseAttributeGroupBlock() {
}
}
-std::error_code BitcodeReader::ParseTypeTable() {
+std::error_code BitcodeReader::parseTypeTable() {
if (Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID_NEW))
- return Error("Invalid record");
+ return error("Invalid record");
- return ParseTypeTableBody();
+ return parseTypeTableBody();
}
-std::error_code BitcodeReader::ParseTypeTableBody() {
+std::error_code BitcodeReader::parseTypeTableBody() {
if (!TypeList.empty())
- return Error("Invalid multiple blocks");
+ return error("Invalid multiple blocks");
SmallVector<uint64_t, 64> Record;
unsigned NumRecords = 0;
@@ -1299,10 +1293,10 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
switch (Entry.Kind) {
case BitstreamEntry::SubBlock: // Handled for us already.
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
if (NumRecords != TypeList.size())
- return Error("Malformed block");
+ return error("Malformed block");
return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
@@ -1314,12 +1308,12 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
Type *ResultTy = nullptr;
switch (Stream.readRecord(Entry.ID, Record)) {
default:
- return Error("Invalid value");
+ return error("Invalid value");
case bitc::TYPE_CODE_NUMENTRY: // TYPE_CODE_NUMENTRY: [numentries]
// TYPE_CODE_NUMENTRY contains a count of the number of types in the
// type list. This allows us to reserve space.
if (Record.size() < 1)
- return Error("Invalid record");
+ return error("Invalid record");
TypeList.resize(Record[0]);
continue;
case bitc::TYPE_CODE_VOID: // VOID
@@ -1354,26 +1348,26 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
break;
case bitc::TYPE_CODE_INTEGER: { // INTEGER: [width]
if (Record.size() < 1)
- return Error("Invalid record");
+ return error("Invalid record");
uint64_t NumBits = Record[0];
if (NumBits < IntegerType::MIN_INT_BITS ||
NumBits > IntegerType::MAX_INT_BITS)
- return Error("Bitwidth for integer type out of range");
+ return error("Bitwidth for integer type out of range");
ResultTy = IntegerType::get(Context, NumBits);
break;
}
case bitc::TYPE_CODE_POINTER: { // POINTER: [pointee type] or
// [pointee type, address space]
if (Record.size() < 1)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned AddressSpace = 0;
if (Record.size() == 2)
AddressSpace = Record[1];
ResultTy = getTypeByID(Record[0]);
if (!ResultTy ||
!PointerType::isValidElementType(ResultTy))
- return Error("Invalid type");
+ return error("Invalid type");
ResultTy = PointerType::get(ResultTy, AddressSpace);
break;
}
@@ -1381,7 +1375,7 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
// FIXME: attrid is dead, remove it in LLVM 4.0
// FUNCTION: [vararg, attrid, retty, paramty x N]
if (Record.size() < 3)
- return Error("Invalid record");
+ return error("Invalid record");
SmallVector<Type*, 8> ArgTys;
for (unsigned i = 3, e = Record.size(); i != e; ++i) {
if (Type *T = getTypeByID(Record[i]))
@@ -1392,7 +1386,7 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
ResultTy = getTypeByID(Record[2]);
if (!ResultTy || ArgTys.size() < Record.size()-3)
- return Error("Invalid type");
+ return error("Invalid type");
ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
break;
@@ -1400,12 +1394,12 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
case bitc::TYPE_CODE_FUNCTION: {
// FUNCTION: [vararg, retty, paramty x N]
if (Record.size() < 2)
- return Error("Invalid record");
+ return error("Invalid record");
SmallVector<Type*, 8> ArgTys;
for (unsigned i = 2, e = Record.size(); i != e; ++i) {
if (Type *T = getTypeByID(Record[i])) {
if (!FunctionType::isValidArgumentType(T))
- return Error("Invalid function argument type");
+ return error("Invalid function argument type");
ArgTys.push_back(T);
}
else
@@ -1414,14 +1408,14 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
ResultTy = getTypeByID(Record[1]);
if (!ResultTy || ArgTys.size() < Record.size()-2)
- return Error("Invalid type");
+ return error("Invalid type");
ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
break;
}
case bitc::TYPE_CODE_STRUCT_ANON: { // STRUCT: [ispacked, eltty x N]
if (Record.size() < 1)
- return Error("Invalid record");
+ return error("Invalid record");
SmallVector<Type*, 8> EltTys;
for (unsigned i = 1, e = Record.size(); i != e; ++i) {
if (Type *T = getTypeByID(Record[i]))
@@ -1430,21 +1424,21 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
break;
}
if (EltTys.size() != Record.size()-1)
- return Error("Invalid type");
+ return error("Invalid type");
ResultTy = StructType::get(Context, EltTys, Record[0]);
break;
}
case bitc::TYPE_CODE_STRUCT_NAME: // STRUCT_NAME: [strchr x N]
- if (ConvertToString(Record, 0, TypeName))
- return Error("Invalid record");
+ if (convertToString(Record, 0, TypeName))
+ return error("Invalid record");
continue;
case bitc::TYPE_CODE_STRUCT_NAMED: { // STRUCT: [ispacked, eltty x N]
if (Record.size() < 1)
- return Error("Invalid record");
+ return error("Invalid record");
if (NumRecords >= TypeList.size())
- return Error("Invalid TYPE table");
+ return error("Invalid TYPE table");
// Check to see if this was forward referenced, if so fill in the temp.
StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]);
@@ -1463,17 +1457,17 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
break;
}
if (EltTys.size() != Record.size()-1)
- return Error("Invalid record");
+ return error("Invalid record");
Res->setBody(EltTys, Record[0]);
ResultTy = Res;
break;
}
case bitc::TYPE_CODE_OPAQUE: { // OPAQUE: []
if (Record.size() != 1)
- return Error("Invalid record");
+ return error("Invalid record");
if (NumRecords >= TypeList.size())
- return Error("Invalid TYPE table");
+ return error("Invalid TYPE table");
// Check to see if this was forward referenced, if so fill in the temp.
StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]);
@@ -1488,37 +1482,37 @@ std::error_code BitcodeReader::ParseTypeTableBody() {
}
case bitc::TYPE_CODE_ARRAY: // ARRAY: [numelts, eltty]
if (Record.size() < 2)
- return Error("Invalid record");
+ return error("Invalid record");
ResultTy = getTypeByID(Record[1]);
if (!ResultTy || !ArrayType::isValidElementType(ResultTy))
- return Error("Invalid type");
+ return error("Invalid type");
ResultTy = ArrayType::get(ResultTy, Record[0]);
break;
case bitc::TYPE_CODE_VECTOR: // VECTOR: [numelts, eltty]
if (Record.size() < 2)
- return Error("Invalid record");
+ return error("Invalid record");
if (Record[0] == 0)
- return Error("Invalid vector length");
+ return error("Invalid vector length");
ResultTy = getTypeByID(Record[1]);
if (!ResultTy || !StructType::isValidElementType(ResultTy))
- return Error("Invalid type");
+ return error("Invalid type");
ResultTy = VectorType::get(ResultTy, Record[0]);
break;
}
if (NumRecords >= TypeList.size())
- return Error("Invalid TYPE table");
+ return error("Invalid TYPE table");
if (TypeList[NumRecords])
- return Error(
+ return error(
"Invalid TYPE table: Only named structs can be forward referenced");
assert(ResultTy && "Didn't read a type?");
TypeList[NumRecords++] = ResultTy;
}
}
-std::error_code BitcodeReader::ParseValueSymbolTable() {
+std::error_code BitcodeReader::parseValueSymbolTable() {
if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
- return Error("Invalid record");
+ return error("Invalid record");
SmallVector<uint64_t, 64> Record;
@@ -1532,7 +1526,7 @@ std::error_code BitcodeReader::ParseValueSymbolTable() {
switch (Entry.Kind) {
case BitstreamEntry::SubBlock: // Handled for us already.
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
return std::error_code();
case BitstreamEntry::Record:
@@ -1546,11 +1540,11 @@ std::error_code BitcodeReader::ParseValueSymbolTable() {
default: // Default behavior: unknown type.
break;
case bitc::VST_CODE_ENTRY: { // VST_ENTRY: [valueid, namechar x N]
- if (ConvertToString(Record, 1, ValueName))
- return Error("Invalid record");
+ if (convertToString(Record, 1, ValueName))
+ return error("Invalid record");
unsigned ValueID = Record[0];
if (ValueID >= ValueList.size() || !ValueList[ValueID])
- return Error("Invalid record");
+ return error("Invalid record");
Value *V = ValueList[ValueID];
V->setName(StringRef(ValueName.data(), ValueName.size()));
@@ -1566,11 +1560,11 @@ std::error_code BitcodeReader::ParseValueSymbolTable() {
break;
}
case bitc::VST_CODE_BBENTRY: {
- if (ConvertToString(Record, 1, ValueName))
- return Error("Invalid record");
+ if (convertToString(Record, 1, ValueName))
+ return error("Invalid record");
BasicBlock *BB = getBasicBlock(Record[0]);
if (!BB)
- return Error("Invalid record");
+ return error("Invalid record");
BB->setName(StringRef(ValueName.data(), ValueName.size()));
ValueName.clear();
@@ -1582,12 +1576,12 @@ std::error_code BitcodeReader::ParseValueSymbolTable() {
static int64_t unrotateSign(uint64_t U) { return U & 1 ? ~(U >> 1) : U >> 1; }
-std::error_code BitcodeReader::ParseMetadata() {
+std::error_code BitcodeReader::parseMetadata() {
IsMetadataMaterialized = true;
unsigned NextMDValueNo = MDValueList.size();
if (Stream.EnterSubBlock(bitc::METADATA_BLOCK_ID))
- return Error("Invalid record");
+ return error("Invalid record");
SmallVector<uint64_t, 64> Record;
@@ -1614,7 +1608,7 @@ std::error_code BitcodeReader::ParseMetadata() {
switch (Entry.Kind) {
case BitstreamEntry::SubBlock: // Handled for us already.
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
MDValueList.tryToResolveCycles();
return std::error_code();
@@ -1638,7 +1632,7 @@ std::error_code BitcodeReader::ParseMetadata() {
unsigned NextBitCode = Stream.readRecord(Code, Record);
if (NextBitCode != bitc::METADATA_NAMED_NODE)
- return Error("METADATA_NAME not followed by METADATA_NAMED_NODE");
+ return error("METADATA_NAME not followed by METADATA_NAMED_NODE");
// Read named metadata elements.
unsigned Size = Record.size();
@@ -1646,7 +1640,7 @@ std::error_code BitcodeReader::ParseMetadata() {
for (unsigned i = 0; i != Size; ++i) {
MDNode *MD = dyn_cast_or_null<MDNode>(MDValueList.getValueFwdRef(Record[i]));
if (!MD)
- return Error("Invalid record");
+ return error("Invalid record");
NMD->addOperand(MD);
}
break;
@@ -1656,12 +1650,12 @@ std::error_code BitcodeReader::ParseMetadata() {
// This is a LocalAsMetadata record, the only type of function-local
// metadata.
if (Record.size() % 2 == 1)
- return Error("Invalid record");
+ return error("Invalid record");
// If this isn't a LocalAsMetadata record, we're dropping it. This used
// to be legal, but there's no upgrade path.
auto dropRecord = [&] {
- MDValueList.AssignValue(MDNode::get(Context, None), NextMDValueNo++);
+ MDValueList.assignValue(MDNode::get(Context, None), NextMDValueNo++);
};
if (Record.size() != 2) {
dropRecord();
@@ -1674,7 +1668,7 @@ std::error_code BitcodeReader::ParseMetadata() {
break;
}
- MDValueList.AssignValue(
+ MDValueList.assignValue(
LocalAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty)),
NextMDValueNo++);
break;
@@ -1682,14 +1676,14 @@ std::error_code BitcodeReader::ParseMetadata() {
case bitc::METADATA_OLD_NODE: {
// FIXME: Remove in 4.0.
if (Record.size() % 2 == 1)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned Size = Record.size();
SmallVector<Metadata *, 8> Elts;
for (unsigned i = 0; i != Size; i += 2) {
Type *Ty = getTypeByID(Record[i]);
if (!Ty)
- return Error("Invalid record");
+ return error("Invalid record");
if (Ty->isMetadataTy())
Elts.push_back(MDValueList.getValueFwdRef(Record[i+1]));
else if (!Ty->isVoidTy()) {
@@ -1701,18 +1695,18 @@ std::error_code BitcodeReader::ParseMetadata() {
} else
Elts.push_back(nullptr);
}
- MDValueList.AssignValue(MDNode::get(Context, Elts), NextMDValueNo++);
+ MDValueList.assignValue(MDNode::get(Context, Elts), NextMDValueNo++);
break;
}
case bitc::METADATA_VALUE: {
if (Record.size() != 2)
- return Error("Invalid record");
+ return error("Invalid record");
Type *Ty = getTypeByID(Record[0]);
if (Ty->isMetadataTy() || Ty->isVoidTy())
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
ValueAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty)),
NextMDValueNo++);
break;
@@ -1725,21 +1719,21 @@ std::error_code BitcodeReader::ParseMetadata() {
Elts.reserve(Record.size());
for (unsigned ID : Record)
Elts.push_back(ID ? MDValueList.getValueFwdRef(ID - 1) : nullptr);
- MDValueList.AssignValue(IsDistinct ? MDNode::getDistinct(Context, Elts)
+ MDValueList.assignValue(IsDistinct ? MDNode::getDistinct(Context, Elts)
: MDNode::get(Context, Elts),
NextMDValueNo++);
break;
}
case bitc::METADATA_LOCATION: {
if (Record.size() != 5)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned Line = Record[1];
unsigned Column = Record[2];
MDNode *Scope = cast<MDNode>(MDValueList.getValueFwdRef(Record[3]));
Metadata *InlinedAt =
Record[4] ? MDValueList.getValueFwdRef(Record[4] - 1) : nullptr;
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DILocation, Record[0],
(Context, Line, Column, Scope, InlinedAt)),
NextMDValueNo++);
@@ -1747,29 +1741,29 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_GENERIC_DEBUG: {
if (Record.size() < 4)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned Tag = Record[1];
unsigned Version = Record[2];
if (Tag >= 1u << 16 || Version != 0)
- return Error("Invalid record");
+ return error("Invalid record");
auto *Header = getMDString(Record[3]);
SmallVector<Metadata *, 8> DwarfOps;
for (unsigned I = 4, E = Record.size(); I != E; ++I)
DwarfOps.push_back(Record[I] ? MDValueList.getValueFwdRef(Record[I] - 1)
: nullptr);
- MDValueList.AssignValue(GET_OR_DISTINCT(GenericDINode, Record[0],
+ MDValueList.assignValue(GET_OR_DISTINCT(GenericDINode, Record[0],
(Context, Tag, Header, DwarfOps)),
NextMDValueNo++);
break;
}
case bitc::METADATA_SUBRANGE: {
if (Record.size() != 3)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DISubrange, Record[0],
(Context, Record[1], unrotateSign(Record[2]))),
NextMDValueNo++);
@@ -1777,9 +1771,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_ENUMERATOR: {
if (Record.size() != 3)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(GET_OR_DISTINCT(DIEnumerator, Record[0],
+ MDValueList.assignValue(GET_OR_DISTINCT(DIEnumerator, Record[0],
(Context, unrotateSign(Record[1]),
getMDString(Record[2]))),
NextMDValueNo++);
@@ -1787,9 +1781,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_BASIC_TYPE: {
if (Record.size() != 6)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DIBasicType, Record[0],
(Context, Record[1], getMDString(Record[2]),
Record[3], Record[4], Record[5])),
@@ -1798,9 +1792,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_DERIVED_TYPE: {
if (Record.size() != 12)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DIDerivedType, Record[0],
(Context, Record[1], getMDString(Record[2]),
getMDOrNull(Record[3]), Record[4],
@@ -1812,9 +1806,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_COMPOSITE_TYPE: {
if (Record.size() != 16)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DICompositeType, Record[0],
(Context, Record[1], getMDString(Record[2]),
getMDOrNull(Record[3]), Record[4],
@@ -1828,9 +1822,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_SUBROUTINE_TYPE: {
if (Record.size() != 3)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DISubroutineType, Record[0],
(Context, Record[1], getMDOrNull(Record[2]))),
NextMDValueNo++);
@@ -1838,9 +1832,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_FILE: {
if (Record.size() != 3)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DIFile, Record[0], (Context, getMDString(Record[1]),
getMDString(Record[2]))),
NextMDValueNo++);
@@ -1848,26 +1842,25 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_COMPILE_UNIT: {
if (Record.size() < 14 || Record.size() > 15)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
- GET_OR_DISTINCT(DICompileUnit, Record[0],
- (Context, Record[1], getMDOrNull(Record[2]),
- getMDString(Record[3]), Record[4],
- getMDString(Record[5]), Record[6],
- getMDString(Record[7]), Record[8],
- getMDOrNull(Record[9]), getMDOrNull(Record[10]),
- getMDOrNull(Record[11]), getMDOrNull(Record[12]),
- getMDOrNull(Record[13]),
- Record.size() == 14 ? 0 : Record[14])),
+ MDValueList.assignValue(
+ GET_OR_DISTINCT(
+ DICompileUnit, Record[0],
+ (Context, Record[1], getMDOrNull(Record[2]),
+ getMDString(Record[3]), Record[4], getMDString(Record[5]),
+ Record[6], getMDString(Record[7]), Record[8],
+ getMDOrNull(Record[9]), getMDOrNull(Record[10]),
+ getMDOrNull(Record[11]), getMDOrNull(Record[12]),
+ getMDOrNull(Record[13]), Record.size() == 14 ? 0 : Record[14])),
NextMDValueNo++);
break;
}
case bitc::METADATA_SUBPROGRAM: {
if (Record.size() != 19)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(
DISubprogram, Record[0],
(Context, getMDOrNull(Record[1]), getMDString(Record[2]),
@@ -1881,9 +1874,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_LEXICAL_BLOCK: {
if (Record.size() != 5)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DILexicalBlock, Record[0],
(Context, getMDOrNull(Record[1]),
getMDOrNull(Record[2]), Record[3], Record[4])),
@@ -1892,9 +1885,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_LEXICAL_BLOCK_FILE: {
if (Record.size() != 4)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DILexicalBlockFile, Record[0],
(Context, getMDOrNull(Record[1]),
getMDOrNull(Record[2]), Record[3])),
@@ -1903,9 +1896,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_NAMESPACE: {
if (Record.size() != 5)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DINamespace, Record[0],
(Context, getMDOrNull(Record[1]),
getMDOrNull(Record[2]), getMDString(Record[3]),
@@ -1915,9 +1908,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_TEMPLATE_TYPE: {
if (Record.size() != 3)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(GET_OR_DISTINCT(DITemplateTypeParameter,
+ MDValueList.assignValue(GET_OR_DISTINCT(DITemplateTypeParameter,
Record[0],
(Context, getMDString(Record[1]),
getMDOrNull(Record[2]))),
@@ -1926,9 +1919,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_TEMPLATE_VALUE: {
if (Record.size() != 5)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DITemplateValueParameter, Record[0],
(Context, Record[1], getMDString(Record[2]),
getMDOrNull(Record[3]), getMDOrNull(Record[4]))),
@@ -1937,9 +1930,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_GLOBAL_VAR: {
if (Record.size() != 11)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DIGlobalVariable, Record[0],
(Context, getMDOrNull(Record[1]),
getMDString(Record[2]), getMDString(Record[3]),
@@ -1952,9 +1945,9 @@ std::error_code BitcodeReader::ParseMetadata() {
case bitc::METADATA_LOCAL_VAR: {
// 10th field is for the obseleted 'inlinedAt:' field.
if (Record.size() != 9 && Record.size() != 10)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DILocalVariable, Record[0],
(Context, Record[1], getMDOrNull(Record[2]),
getMDString(Record[3]), getMDOrNull(Record[4]),
@@ -1965,9 +1958,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_EXPRESSION: {
if (Record.size() < 1)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DIExpression, Record[0],
(Context, makeArrayRef(Record).slice(1))),
NextMDValueNo++);
@@ -1975,9 +1968,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_OBJC_PROPERTY: {
if (Record.size() != 8)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DIObjCProperty, Record[0],
(Context, getMDString(Record[1]),
getMDOrNull(Record[2]), Record[3],
@@ -1988,9 +1981,9 @@ std::error_code BitcodeReader::ParseMetadata() {
}
case bitc::METADATA_IMPORTED_ENTITY: {
if (Record.size() != 6)
- return Error("Invalid record");
+ return error("Invalid record");
- MDValueList.AssignValue(
+ MDValueList.assignValue(
GET_OR_DISTINCT(DIImportedEntity, Record[0],
(Context, Record[1], getMDOrNull(Record[2]),
getMDOrNull(Record[3]), Record[4],
@@ -2002,19 +1995,19 @@ std::error_code BitcodeReader::ParseMetadata() {
std::string String(Record.begin(), Record.end());
llvm::UpgradeMDStringConstant(String);
Metadata *MD = MDString::get(Context, String);
- MDValueList.AssignValue(MD, NextMDValueNo++);
+ MDValueList.assignValue(MD, NextMDValueNo++);
break;
}
case bitc::METADATA_KIND: {
if (Record.size() < 2)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned Kind = Record[0];
SmallString<8> Name(Record.begin()+1, Record.end());
unsigned NewKind = TheModule->getMDKindID(Name.str());
if (!MDKindMap.insert(std::make_pair(Kind, NewKind)).second)
- return Error("Conflicting METADATA_KIND records");
+ return error("Conflicting METADATA_KIND records");
break;
}
}
@@ -2022,8 +2015,8 @@ std::error_code BitcodeReader::ParseMetadata() {
#undef GET_OR_DISTINCT
}
-/// decodeSignRotatedValue - Decode a signed value stored with the sign bit in
-/// the LSB for dense VBR encoding.
+/// Decode a signed value stored with the sign bit in the LSB for dense VBR
+/// encoding.
uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) {
if ((V & 1) == 0)
return V >> 1;
@@ -2033,18 +2026,19 @@ uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) {
return 1ULL << 63;
}
-/// ResolveGlobalAndAliasInits - Resolve all of the initializers for global
-/// values and aliases that we can.
-std::error_code BitcodeReader::ResolveGlobalAndAliasInits() {
+/// Resolve all of the initializers for global values and aliases that we can.
+std::error_code BitcodeReader::resolveGlobalAndAliasInits() {
std::vector<std::pair<GlobalVariable*, unsigned> > GlobalInitWorklist;
std::vector<std::pair<GlobalAlias*, unsigned> > AliasInitWorklist;
std::vector<std::pair<Function*, unsigned> > FunctionPrefixWorklist;
std::vector<std::pair<Function*, unsigned> > FunctionPrologueWorklist;
+ std::vector<std::pair<Function*, unsigned> > FunctionPersonalityFnWorklist;
GlobalInitWorklist.swap(GlobalInits);
AliasInitWorklist.swap(AliasInits);
FunctionPrefixWorklist.swap(FunctionPrefixes);
FunctionPrologueWorklist.swap(FunctionPrologues);
+ FunctionPersonalityFnWorklist.swap(FunctionPersonalityFns);
while (!GlobalInitWorklist.empty()) {
unsigned ValID = GlobalInitWorklist.back().second;
@@ -2055,7 +2049,7 @@ std::error_code BitcodeReader::ResolveGlobalAndAliasInits() {
if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
GlobalInitWorklist.back().first->setInitializer(C);
else
- return Error("Expected a constant");
+ return error("Expected a constant");
}
GlobalInitWorklist.pop_back();
}
@@ -2067,10 +2061,10 @@ std::error_code BitcodeReader::ResolveGlobalAndAliasInits() {
} else {
Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]);
if (!C)
- return Error("Expected a constant");
+ return error("Expected a constant");
GlobalAlias *Alias = AliasInitWorklist.back().first;
if (C->getType() != Alias->getType())
- return Error("Alias and aliasee types don't match");
+ return error("Alias and aliasee types don't match");
Alias->setAliasee(C);
}
AliasInitWorklist.pop_back();
@@ -2084,7 +2078,7 @@ std::error_code BitcodeReader::ResolveGlobalAndAliasInits() {
if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
FunctionPrefixWorklist.back().first->setPrefixData(C);
else
- return Error("Expected a constant");
+ return error("Expected a constant");
}
FunctionPrefixWorklist.pop_back();
}
@@ -2097,15 +2091,28 @@ std::error_code BitcodeReader::ResolveGlobalAndAliasInits() {
if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
FunctionPrologueWorklist.back().first->setPrologueData(C);
else
- return Error("Expected a constant");
+ return error("Expected a constant");
}
FunctionPrologueWorklist.pop_back();
}
+ while (!FunctionPersonalityFnWorklist.empty()) {
+ unsigned ValID = FunctionPersonalityFnWorklist.back().second;
+ if (ValID >= ValueList.size()) {
+ FunctionPersonalityFns.push_back(FunctionPersonalityFnWorklist.back());
+ } else {
+ if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
+ FunctionPersonalityFnWorklist.back().first->setPersonalityFn(C);
+ else
+ return error("Expected a constant");
+ }
+ FunctionPersonalityFnWorklist.pop_back();
+ }
+
return std::error_code();
}
-static APInt ReadWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
+static APInt readWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
SmallVector<uint64_t, 8> Words(Vals.size());
std::transform(Vals.begin(), Vals.end(), Words.begin(),
BitcodeReader::decodeSignRotatedValue);
@@ -2113,9 +2120,9 @@ static APInt ReadWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
return APInt(TypeBits, Words);
}
-std::error_code BitcodeReader::ParseConstants() {
+std::error_code BitcodeReader::parseConstants() {
if (Stream.EnterSubBlock(bitc::CONSTANTS_BLOCK_ID))
- return Error("Invalid record");
+ return error("Invalid record");
SmallVector<uint64_t, 64> Record;
@@ -2128,14 +2135,14 @@ std::error_code BitcodeReader::ParseConstants() {
switch (Entry.Kind) {
case BitstreamEntry::SubBlock: // Handled for us already.
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
if (NextCstNo != ValueList.size())
- return Error("Invalid ronstant reference");
+ return error("Invalid ronstant reference");
// Once all the constants have been read, go through and resolve forward
// references.
- ValueList.ResolveConstantForwardRefs();
+ ValueList.resolveConstantForwardRefs();
return std::error_code();
case BitstreamEntry::Record:
// The interesting case.
@@ -2153,9 +2160,9 @@ std::error_code BitcodeReader::ParseConstants() {
break;
case bitc::CST_CODE_SETTYPE: // SETTYPE: [typeid]
if (Record.empty())
- return Error("Invalid record");
+ return error("Invalid record");
if (Record[0] >= TypeList.size() || !TypeList[Record[0]])
- return Error("Invalid record");
+ return error("Invalid record");
CurTy = TypeList[Record[0]];
continue; // Skip the ValueList manipulation.
case bitc::CST_CODE_NULL: // NULL
@@ -2163,22 +2170,22 @@ std::error_code BitcodeReader::ParseConstants() {
break;
case bitc::CST_CODE_INTEGER: // INTEGER: [intval]
if (!CurTy->isIntegerTy() || Record.empty())
- return Error("Invalid record");
+ return error("Invalid record");
V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0]));
break;
case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval]
if (!CurTy->isIntegerTy() || Record.empty())
- return Error("Invalid record");
+ return error("Invalid record");
- APInt VInt = ReadWideAPInt(Record,
- cast<IntegerType>(CurTy)->getBitWidth());
+ APInt VInt =
+ readWideAPInt(Record, cast<IntegerType>(CurTy)->getBitWidth());
V = ConstantInt::get(Context, VInt);
break;
}
case bitc::CST_CODE_FLOAT: { // FLOAT: [fpval]
if (Record.empty())
- return Error("Invalid record");
+ return error("Invalid record");
if (CurTy->isHalfTy())
V = ConstantFP::get(Context, APFloat(APFloat::IEEEhalf,
APInt(16, (uint16_t)Record[0])));
@@ -2208,7 +2215,7 @@ std::error_code BitcodeReader::ParseConstants() {
case bitc::CST_CODE_AGGREGATE: {// AGGREGATE: [n x value number]
if (Record.empty())
- return Error("Invalid record");
+ return error("Invalid record");
unsigned Size = Record.size();
SmallVector<Constant*, 16> Elts;
@@ -2236,7 +2243,7 @@ std::error_code BitcodeReader::ParseConstants() {
case bitc::CST_CODE_STRING: // STRING: [values]
case bitc::CST_CODE_CSTRING: { // CSTRING: [values]
if (Record.empty())
- return Error("Invalid record");
+ return error("Invalid record");
SmallString<16> Elts(Record.begin(), Record.end());
V = ConstantDataArray::getString(Context, Elts,
@@ -2245,7 +2252,7 @@ std::error_code BitcodeReader::ParseConstants() {
}
case bitc::CST_CODE_DATA: {// DATA: [n x value]
if (Record.empty())
- return Error("Invalid record");
+ return error("Invalid record");
Type *EltTy = cast<SequentialType>(CurTy)->getElementType();
unsigned Size = Record.size();
@@ -2290,15 +2297,15 @@ std::error_code BitcodeReader::ParseConstants() {
else
V = ConstantDataArray::get(Context, Elts);
} else {
- return Error("Invalid type for value");
+ return error("Invalid type for value");
}
break;
}
case bitc::CST_CODE_CE_BINOP: { // CE_BINOP: [opcode, opval, opval]
if (Record.size() < 3)
- return Error("Invalid record");
- int Opc = GetDecodedBinaryOpcode(Record[0], CurTy);
+ return error("Invalid record");
+ int Opc = getDecodedBinaryOpcode(Record[0], CurTy);
if (Opc < 0) {
V = UndefValue::get(CurTy); // Unknown binop.
} else {
@@ -2328,14 +2335,14 @@ std::error_code BitcodeReader::ParseConstants() {
}
case bitc::CST_CODE_CE_CAST: { // CE_CAST: [opcode, opty, opval]
if (Record.size() < 3)
- return Error("Invalid record");
- int Opc = GetDecodedCastOpcode(Record[0]);
+ return error("Invalid record");
+ int Opc = getDecodedCastOpcode(Record[0]);
if (Opc < 0) {
V = UndefValue::get(CurTy); // Unknown cast.
} else {
Type *OpTy = getTypeByID(Record[1]);
if (!OpTy)
- return Error("Invalid record");
+ return error("Invalid record");
Constant *Op = ValueList.getConstantFwdRef(Record[2], OpTy);
V = UpgradeBitCastExpr(Opc, Op, CurTy);
if (!V) V = ConstantExpr::getCast(Opc, Op, CurTy);
@@ -2352,7 +2359,7 @@ std::error_code BitcodeReader::ParseConstants() {
while (OpNum != Record.size()) {
Type *ElTy = getTypeByID(Record[OpNum++]);
if (!ElTy)
- return Error("Invalid record");
+ return error("Invalid record");
Elts.push_back(ValueList.getConstantFwdRef(Record[OpNum++], ElTy));
}
@@ -2360,7 +2367,7 @@ std::error_code BitcodeReader::ParseConstants() {
PointeeType !=
cast<SequentialType>(Elts[0]->getType()->getScalarType())
->getElementType())
- return Error("Explicit gep operator type does not match pointee type "
+ return error("Explicit gep operator type does not match pointee type "
"of pointer operand");
ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end());
@@ -2371,7 +2378,7 @@ std::error_code BitcodeReader::ParseConstants() {
}
case bitc::CST_CODE_CE_SELECT: { // CE_SELECT: [opval#, opval#, opval#]
if (Record.size() < 3)
- return Error("Invalid record");
+ return error("Invalid record");
Type *SelectorTy = Type::getInt1Ty(Context);
@@ -2390,22 +2397,22 @@ std::error_code BitcodeReader::ParseConstants() {
case bitc::CST_CODE_CE_EXTRACTELT
: { // CE_EXTRACTELT: [opty, opval, opty, opval]
if (Record.size() < 3)
- return Error("Invalid record");
+ return error("Invalid record");
VectorType *OpTy =
dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
if (!OpTy)
- return Error("Invalid record");
+ return error("Invalid record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
Constant *Op1 = nullptr;
if (Record.size() == 4) {
Type *IdxTy = getTypeByID(Record[2]);
if (!IdxTy)
- return Error("Invalid record");
+ return error("Invalid record");
Op1 = ValueList.getConstantFwdRef(Record[3], IdxTy);
} else // TODO: Remove with llvm 4.0
Op1 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
if (!Op1)
- return Error("Invalid record");
+ return error("Invalid record");
V = ConstantExpr::getExtractElement(Op0, Op1);
break;
}
@@ -2413,7 +2420,7 @@ std::error_code BitcodeReader::ParseConstants() {
: { // CE_INSERTELT: [opval, opval, opty, opval]
VectorType *OpTy = dyn_cast<VectorType>(CurTy);
if (Record.size() < 3 || !OpTy)
- return Error("Invalid record");
+ return error("Invalid record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[1],
OpTy->getElementType());
@@ -2421,19 +2428,19 @@ std::error_code BitcodeReader::ParseConstants() {
if (Record.size() == 4) {
Type *IdxTy = getTypeByID(Record[2]);
if (!IdxTy)
- return Error("Invalid record");
+ return error("Invalid record");
Op2 = ValueList.getConstantFwdRef(Record[3], IdxTy);
} else // TODO: Remove with llvm 4.0
Op2 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
if (!Op2)
- return Error("Invalid record");
+ return error("Invalid record");
V = ConstantExpr::getInsertElement(Op0, Op1, Op2);
break;
}
case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval]
VectorType *OpTy = dyn_cast<VectorType>(CurTy);
if (Record.size() < 3 || !OpTy)
- return Error("Invalid record");
+ return error("Invalid record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy);
Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
@@ -2447,7 +2454,7 @@ std::error_code BitcodeReader::ParseConstants() {
VectorType *OpTy =
dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
if (Record.size() < 4 || !RTy || !OpTy)
- return Error("Invalid record");
+ return error("Invalid record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
@@ -2458,10 +2465,10 @@ std::error_code BitcodeReader::ParseConstants() {
}
case bitc::CST_CODE_CE_CMP: { // CE_CMP: [opty, opval, opval, pred]
if (Record.size() < 4)
- return Error("Invalid record");
+ return error("Invalid record");
Type *OpTy = getTypeByID(Record[0]);
if (!OpTy)
- return Error("Invalid record");
+ return error("Invalid record");
Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
@@ -2475,16 +2482,16 @@ std::error_code BitcodeReader::ParseConstants() {
// FIXME: Remove with the 4.0 release.
case bitc::CST_CODE_INLINEASM_OLD: {
if (Record.size() < 2)
- return Error("Invalid record");
+ return error("Invalid record");
std::string AsmStr, ConstrStr;
bool HasSideEffects = Record[0] & 1;
bool IsAlignStack = Record[0] >> 1;
unsigned AsmStrSize = Record[1];
if (2+AsmStrSize >= Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
unsigned ConstStrSize = Record[2+AsmStrSize];
if (3+AsmStrSize+ConstStrSize > Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
for (unsigned i = 0; i != AsmStrSize; ++i)
AsmStr += (char)Record[2+i];
@@ -2499,17 +2506,17 @@ std::error_code BitcodeReader::ParseConstants() {
// inteldialect).
case bitc::CST_CODE_INLINEASM: {
if (Record.size() < 2)
- return Error("Invalid record");
+ return error("Invalid record");
std::string AsmStr, ConstrStr;
bool HasSideEffects = Record[0] & 1;
bool IsAlignStack = (Record[0] >> 1) & 1;
unsigned AsmDialect = Record[0] >> 2;
unsigned AsmStrSize = Record[1];
if (2+AsmStrSize >= Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
unsigned ConstStrSize = Record[2+AsmStrSize];
if (3+AsmStrSize+ConstStrSize > Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
for (unsigned i = 0; i != AsmStrSize; ++i)
AsmStr += (char)Record[2+i];
@@ -2523,14 +2530,14 @@ std::error_code BitcodeReader::ParseConstants() {
}
case bitc::CST_CODE_BLOCKADDRESS:{
if (Record.size() < 3)
- return Error("Invalid record");
+ return error("Invalid record");
Type *FnTy = getTypeByID(Record[0]);
if (!FnTy)
- return Error("Invalid record");
+ return error("Invalid record");
Function *Fn =
dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy));
if (!Fn)
- return Error("Invalid record");
+ return error("Invalid record");
// Don't let Fn get dematerialized.
BlockAddressesTaken.insert(Fn);
@@ -2541,12 +2548,12 @@ std::error_code BitcodeReader::ParseConstants() {
unsigned BBID = Record[2];
if (!BBID)
// Invalid reference to entry block.
- return Error("Invalid ID");
+ return error("Invalid ID");
if (!Fn->empty()) {
Function::iterator BBI = Fn->begin(), BBE = Fn->end();
for (size_t I = 0, E = BBID; I != E; ++I) {
if (BBI == BBE)
- return Error("Invalid ID");
+ return error("Invalid ID");
++BBI;
}
BB = BBI;
@@ -2567,14 +2574,14 @@ std::error_code BitcodeReader::ParseConstants() {
}
}
- ValueList.AssignValue(V, NextCstNo);
+ ValueList.assignValue(V, NextCstNo);
++NextCstNo;
}
}
-std::error_code BitcodeReader::ParseUseLists() {
+std::error_code BitcodeReader::parseUseLists() {
if (Stream.EnterSubBlock(bitc::USELIST_BLOCK_ID))
- return Error("Invalid record");
+ return error("Invalid record");
// Read all the records.
SmallVector<uint64_t, 64> Record;
@@ -2584,7 +2591,7 @@ std::error_code BitcodeReader::ParseUseLists() {
switch (Entry.Kind) {
case BitstreamEntry::SubBlock: // Handled for us already.
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
return std::error_code();
case BitstreamEntry::Record:
@@ -2605,7 +2612,7 @@ std::error_code BitcodeReader::ParseUseLists() {
unsigned RecordLength = Record.size();
if (RecordLength < 3)
// Records should have at least an ID and two indexes.
- return Error("Invalid record");
+ return error("Invalid record");
unsigned ID = Record.back();
Record.pop_back();
@@ -2645,7 +2652,7 @@ std::error_code BitcodeReader::rememberAndSkipMetadata() {
// Skip over the block for now.
if (Stream.SkipBlock())
- return Error("Invalid record");
+ return error("Invalid record");
return std::error_code();
}
@@ -2653,7 +2660,7 @@ std::error_code BitcodeReader::materializeMetadata() {
for (uint64_t BitPos : DeferredMetadataInfo) {
// Move the bit stream to the saved position.
Stream.JumpToBit(BitPos);
- if (std::error_code EC = ParseMetadata())
+ if (std::error_code EC = parseMetadata())
return EC;
}
DeferredMetadataInfo.clear();
@@ -2662,13 +2669,12 @@ std::error_code BitcodeReader::materializeMetadata() {
void BitcodeReader::setStripDebugInfo() { StripDebugInfo = true; }
-/// RememberAndSkipFunctionBody - When we see the block for a function body,
-/// remember where it is and then skip it. This lets us lazily deserialize the
-/// functions.
-std::error_code BitcodeReader::RememberAndSkipFunctionBody() {
+/// When we see the block for a function body, remember where it is and then
+/// skip it. This lets us lazily deserialize the functions.
+std::error_code BitcodeReader::rememberAndSkipFunctionBody() {
// Get the function we are talking about.
if (FunctionsWithBodies.empty())
- return Error("Insufficient function protos");
+ return error("Insufficient function protos");
Function *Fn = FunctionsWithBodies.back();
FunctionsWithBodies.pop_back();
@@ -2679,31 +2685,26 @@ std::error_code BitcodeReader::RememberAndSkipFunctionBody() {
// Skip over the function block for now.
if (Stream.SkipBlock())
- return Error("Invalid record");
+ return error("Invalid record");
return std::error_code();
}
-std::error_code BitcodeReader::GlobalCleanup() {
+std::error_code BitcodeReader::globalCleanup() {
// Patch the initializers for globals and aliases up.
- ResolveGlobalAndAliasInits();
+ resolveGlobalAndAliasInits();
if (!GlobalInits.empty() || !AliasInits.empty())
- return Error("Malformed global initializer set");
+ return error("Malformed global initializer set");
// Look for intrinsic functions which need to be upgraded at some point
- for (Module::iterator FI = TheModule->begin(), FE = TheModule->end();
- FI != FE; ++FI) {
+ for (Function &F : *TheModule) {
Function *NewFn;
- if (UpgradeIntrinsicFunction(FI, NewFn))
- UpgradedIntrinsics.push_back(std::make_pair(FI, NewFn));
+ if (UpgradeIntrinsicFunction(&F, NewFn))
+ UpgradedIntrinsics.push_back(std::make_pair(&F, NewFn));
}
// Look for global variables which need to be renamed.
- for (Module::global_iterator
- GI = TheModule->global_begin(), GE = TheModule->global_end();
- GI != GE;) {
- GlobalVariable *GV = GI++;
- UpgradeGlobalVariable(GV);
- }
+ for (GlobalVariable &GV : TheModule->globals())
+ UpgradeGlobalVariable(&GV);
// Force deallocation of memory for these vectors to favor the client that
// want lazy deserialization.
@@ -2712,12 +2713,12 @@ std::error_code BitcodeReader::GlobalCleanup() {
return std::error_code();
}
-std::error_code BitcodeReader::ParseModule(bool Resume,
+std::error_code BitcodeReader::parseModule(bool Resume,
bool ShouldLazyLoadMetadata) {
if (Resume)
Stream.JumpToBit(NextUnreadBit);
else if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
- return Error("Invalid record");
+ return error("Invalid record");
SmallVector<uint64_t, 64> Record;
std::vector<std::string> SectionTable;
@@ -2729,41 +2730,41 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
switch (Entry.Kind) {
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
- return GlobalCleanup();
+ return globalCleanup();
case BitstreamEntry::SubBlock:
switch (Entry.ID) {
default: // Skip unknown content.
if (Stream.SkipBlock())
- return Error("Invalid record");
+ return error("Invalid record");
break;
case bitc::BLOCKINFO_BLOCK_ID:
if (Stream.ReadBlockInfoBlock())
- return Error("Malformed block");
+ return error("Malformed block");
break;
case bitc::PARAMATTR_BLOCK_ID:
- if (std::error_code EC = ParseAttributeBlock())
+ if (std::error_code EC = parseAttributeBlock())
return EC;
break;
case bitc::PARAMATTR_GROUP_BLOCK_ID:
- if (std::error_code EC = ParseAttributeGroupBlock())
+ if (std::error_code EC = parseAttributeGroupBlock())
return EC;
break;
case bitc::TYPE_BLOCK_ID_NEW:
- if (std::error_code EC = ParseTypeTable())
+ if (std::error_code EC = parseTypeTable())
return EC;
break;
case bitc::VALUE_SYMTAB_BLOCK_ID:
- if (std::error_code EC = ParseValueSymbolTable())
+ if (std::error_code EC = parseValueSymbolTable())
return EC;
SeenValueSymbolTable = true;
break;
case bitc::CONSTANTS_BLOCK_ID:
- if (std::error_code EC = ParseConstants())
+ if (std::error_code EC = parseConstants())
return EC;
- if (std::error_code EC = ResolveGlobalAndAliasInits())
+ if (std::error_code EC = resolveGlobalAndAliasInits())
return EC;
break;
case bitc::METADATA_BLOCK_ID:
@@ -2773,7 +2774,7 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
break;
}
assert(DeferredMetadataInfo.empty() && "Unexpected deferred metadata");
- if (std::error_code EC = ParseMetadata())
+ if (std::error_code EC = parseMetadata())
return EC;
break;
case bitc::FUNCTION_BLOCK_ID:
@@ -2781,12 +2782,12 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
// FunctionsWithBodies list.
if (!SeenFirstFunctionBody) {
std::reverse(FunctionsWithBodies.begin(), FunctionsWithBodies.end());
- if (std::error_code EC = GlobalCleanup())
+ if (std::error_code EC = globalCleanup())
return EC;
SeenFirstFunctionBody = true;
}
- if (std::error_code EC = RememberAndSkipFunctionBody())
+ if (std::error_code EC = rememberAndSkipFunctionBody())
return EC;
// For streaming bitcode, suspend parsing when we reach the function
// bodies. Subsequent materialization calls will resume it when
@@ -2794,13 +2795,13 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
// the bitcode. If the bitcode file is old, the symbol table will be
// at the end instead and will not have been seen yet. In this case,
// just finish the parse now.
- if (LazyStreamer && SeenValueSymbolTable) {
+ if (IsStreamed && SeenValueSymbolTable) {
NextUnreadBit = Stream.GetCurrentBitNo();
return std::error_code();
}
break;
case bitc::USELIST_BLOCK_ID:
- if (std::error_code EC = ParseUseLists())
+ if (std::error_code EC = parseUseLists())
return EC;
break;
}
@@ -2817,12 +2818,12 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
default: break; // Default behavior, ignore unknown content.
case bitc::MODULE_CODE_VERSION: { // VERSION: [version#]
if (Record.size() < 1)
- return Error("Invalid record");
+ return error("Invalid record");
// Only version #0 and #1 are supported so far.
unsigned module_version = Record[0];
switch (module_version) {
default:
- return Error("Invalid value");
+ return error("Invalid value");
case 0:
UseRelativeIDs = false;
break;
@@ -2834,50 +2835,50 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
}
case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid record");
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
TheModule->setTargetTriple(S);
break;
}
case bitc::MODULE_CODE_DATALAYOUT: { // DATALAYOUT: [strchr x N]
std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid record");
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
TheModule->setDataLayout(S);
break;
}
case bitc::MODULE_CODE_ASM: { // ASM: [strchr x N]
std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid record");
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
TheModule->setModuleInlineAsm(S);
break;
}
case bitc::MODULE_CODE_DEPLIB: { // DEPLIB: [strchr x N]
// FIXME: Remove in 4.0.
std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid record");
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
// Ignore value.
break;
}
case bitc::MODULE_CODE_SECTIONNAME: { // SECTIONNAME: [strchr x N]
std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid record");
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
SectionTable.push_back(S);
break;
}
case bitc::MODULE_CODE_GCNAME: { // SECTIONNAME: [strchr x N]
std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid record");
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
GCTable.push_back(S);
break;
}
case bitc::MODULE_CODE_COMDAT: { // COMDAT: [selection_kind, name]
if (Record.size() < 2)
- return Error("Invalid record");
+ return error("Invalid record");
Comdat::SelectionKind SK = getDecodedComdatSelectionKind(Record[0]);
unsigned ComdatNameSize = Record[1];
std::string ComdatName;
@@ -2895,10 +2896,10 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
// comdat]
case bitc::MODULE_CODE_GLOBALVAR: {
if (Record.size() < 6)
- return Error("Invalid record");
+ return error("Invalid record");
Type *Ty = getTypeByID(Record[0]);
if (!Ty)
- return Error("Invalid record");
+ return error("Invalid record");
bool isConstant = Record[1] & 1;
bool explicitType = Record[1] & 2;
unsigned AddressSpace;
@@ -2906,7 +2907,7 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
AddressSpace = Record[1] >> 2;
} else {
if (!Ty->isPointerTy())
- return Error("Invalid type for value");
+ return error("Invalid type for value");
AddressSpace = cast<PointerType>(Ty)->getAddressSpace();
Ty = cast<PointerType>(Ty)->getElementType();
}
@@ -2919,18 +2920,18 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
std::string Section;
if (Record[5]) {
if (Record[5]-1 >= SectionTable.size())
- return Error("Invalid ID");
+ return error("Invalid ID");
Section = SectionTable[Record[5]-1];
}
GlobalValue::VisibilityTypes Visibility = GlobalValue::DefaultVisibility;
// Local linkage must have default visibility.
if (Record.size() > 6 && !GlobalValue::isLocalLinkage(Linkage))
// FIXME: Change to an error if non-default in 4.0.
- Visibility = GetDecodedVisibility(Record[6]);
+ Visibility = getDecodedVisibility(Record[6]);
GlobalVariable::ThreadLocalMode TLM = GlobalVariable::NotThreadLocal;
if (Record.size() > 7)
- TLM = GetDecodedThreadLocalMode(Record[7]);
+ TLM = getDecodedThreadLocalMode(Record[7]);
bool UnnamedAddr = false;
if (Record.size() > 8)
@@ -2950,9 +2951,9 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
NewGV->setUnnamedAddr(UnnamedAddr);
if (Record.size() > 10)
- NewGV->setDLLStorageClass(GetDecodedDLLStorageClass(Record[10]));
+ NewGV->setDLLStorageClass(getDecodedDLLStorageClass(Record[10]));
else
- UpgradeDLLImportExportLinkage(NewGV, RawLinkage);
+ upgradeDLLImportExportLinkage(NewGV, RawLinkage);
ValueList.push_back(NewGV);
@@ -2963,7 +2964,7 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
if (Record.size() > 11) {
if (unsigned ComdatID = Record[11]) {
if (ComdatID > ComdatList.size())
- return Error("Invalid global variable comdat ID");
+ return error("Invalid global variable comdat ID");
NewGV->setComdat(ComdatList[ComdatID - 1]);
}
} else if (hasImplicitComdat(RawLinkage)) {
@@ -2976,15 +2977,15 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
// prologuedata, dllstorageclass, comdat, prefixdata]
case bitc::MODULE_CODE_FUNCTION: {
if (Record.size() < 8)
- return Error("Invalid record");
+ return error("Invalid record");
Type *Ty = getTypeByID(Record[0]);
if (!Ty)
- return Error("Invalid record");
+ return error("Invalid record");
if (auto *PTy = dyn_cast<PointerType>(Ty))
Ty = PTy->getElementType();
auto *FTy = dyn_cast<FunctionType>(Ty);
if (!FTy)
- return Error("Invalid type for value");
+ return error("Invalid type for value");
Function *Func = Function::Create(FTy, GlobalValue::ExternalLinkage,
"", TheModule);
@@ -3001,16 +3002,16 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
Func->setAlignment(Alignment);
if (Record[6]) {
if (Record[6]-1 >= SectionTable.size())
- return Error("Invalid ID");
+ return error("Invalid ID");
Func->setSection(SectionTable[Record[6]-1]);
}
// Local linkage must have default visibility.
if (!Func->hasLocalLinkage())
// FIXME: Change to an error if non-default in 4.0.
- Func->setVisibility(GetDecodedVisibility(Record[7]));
+ Func->setVisibility(getDecodedVisibility(Record[7]));
if (Record.size() > 8 && Record[8]) {
if (Record[8]-1 >= GCTable.size())
- return Error("Invalid ID");
+ return error("Invalid ID");
Func->setGC(GCTable[Record[8]-1].c_str());
}
bool UnnamedAddr = false;
@@ -3021,14 +3022,14 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
FunctionPrologues.push_back(std::make_pair(Func, Record[10]-1));
if (Record.size() > 11)
- Func->setDLLStorageClass(GetDecodedDLLStorageClass(Record[11]));
+ Func->setDLLStorageClass(getDecodedDLLStorageClass(Record[11]));
else
- UpgradeDLLImportExportLinkage(Func, RawLinkage);
+ upgradeDLLImportExportLinkage(Func, RawLinkage);
if (Record.size() > 12) {
if (unsigned ComdatID = Record[12]) {
if (ComdatID > ComdatList.size())
- return Error("Invalid function comdat ID");
+ return error("Invalid function comdat ID");
Func->setComdat(ComdatList[ComdatID - 1]);
}
} else if (hasImplicitComdat(RawLinkage)) {
@@ -3038,6 +3039,9 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
if (Record.size() > 13 && Record[13] != 0)
FunctionPrefixes.push_back(std::make_pair(Func, Record[13]-1));
+ if (Record.size() > 14 && Record[14] != 0)
+ FunctionPersonalityFns.push_back(std::make_pair(Func, Record[14] - 1));
+
ValueList.push_back(Func);
// If this is a function with a body, remember the prototype we are
@@ -3045,7 +3049,7 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
if (!isProto) {
Func->setIsMaterializable(true);
FunctionsWithBodies.push_back(Func);
- if (LazyStreamer)
+ if (IsStreamed)
DeferredFunctionInfo[Func] = 0;
}
break;
@@ -3054,13 +3058,13 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
// ALIAS: [alias type, aliasee val#, linkage, visibility, dllstorageclass]
case bitc::MODULE_CODE_ALIAS: {
if (Record.size() < 3)
- return Error("Invalid record");
+ return error("Invalid record");
Type *Ty = getTypeByID(Record[0]);
if (!Ty)
- return Error("Invalid record");
+ return error("Invalid record");
auto *PTy = dyn_cast<PointerType>(Ty);
if (!PTy)
- return Error("Invalid type for value");
+ return error("Invalid type for value");
auto *NewGA =
GlobalAlias::create(PTy, getDecodedLinkage(Record[2]), "", TheModule);
@@ -3068,13 +3072,13 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
// Local linkage must have default visibility.
if (Record.size() > 3 && !NewGA->hasLocalLinkage())
// FIXME: Change to an error if non-default in 4.0.
- NewGA->setVisibility(GetDecodedVisibility(Record[3]));
+ NewGA->setVisibility(getDecodedVisibility(Record[3]));
if (Record.size() > 4)
- NewGA->setDLLStorageClass(GetDecodedDLLStorageClass(Record[4]));
+ NewGA->setDLLStorageClass(getDecodedDLLStorageClass(Record[4]));
else
- UpgradeDLLImportExportLinkage(NewGA, Record[2]);
+ upgradeDLLImportExportLinkage(NewGA, Record[2]);
if (Record.size() > 5)
- NewGA->setThreadLocalMode(GetDecodedThreadLocalMode(Record[5]));
+ NewGA->setThreadLocalMode(getDecodedThreadLocalMode(Record[5]));
if (Record.size() > 6)
NewGA->setUnnamedAddr(Record[6]);
ValueList.push_back(NewGA);
@@ -3085,7 +3089,7 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
case bitc::MODULE_CODE_PURGEVALS:
// Trim down the value list to the specified size.
if (Record.size() < 1 || Record[0] > ValueList.size())
- return Error("Invalid record");
+ return error("Invalid record");
ValueList.shrinkTo(Record[0]);
break;
}
@@ -3093,11 +3097,12 @@ std::error_code BitcodeReader::ParseModule(bool Resume,
}
}
-std::error_code BitcodeReader::ParseBitcodeInto(Module *M,
- bool ShouldLazyLoadMetadata) {
- TheModule = nullptr;
+std::error_code
+BitcodeReader::parseBitcodeInto(std::unique_ptr<DataStreamer> Streamer,
+ Module *M, bool ShouldLazyLoadMetadata) {
+ TheModule = M;
- if (std::error_code EC = InitStream())
+ if (std::error_code EC = initStream(std::move(Streamer)))
return EC;
// Sniff for the signature.
@@ -3107,68 +3112,33 @@ std::error_code BitcodeReader::ParseBitcodeInto(Module *M,
Stream.Read(4) != 0xC ||
Stream.Read(4) != 0xE ||
Stream.Read(4) != 0xD)
- return Error("Invalid bitcode signature");
+ return error("Invalid bitcode signature");
// We expect a number of well-defined blocks, though we don't necessarily
// need to understand them all.
while (1) {
if (Stream.AtEndOfStream()) {
- if (TheModule)
- return std::error_code();
// We didn't really read a proper Module.
- return Error("Malformed IR file");
+ return error("Malformed IR file");
}
BitstreamEntry Entry =
Stream.advance(BitstreamCursor::AF_DontAutoprocessAbbrevs);
- switch (Entry.Kind) {
- case BitstreamEntry::Error:
- return Error("Malformed block");
- case BitstreamEntry::EndBlock:
- return std::error_code();
+ if (Entry.Kind != BitstreamEntry::SubBlock)
+ return error("Malformed block");
- case BitstreamEntry::SubBlock:
- switch (Entry.ID) {
- case bitc::BLOCKINFO_BLOCK_ID:
- if (Stream.ReadBlockInfoBlock())
- return Error("Malformed block");
- break;
- case bitc::MODULE_BLOCK_ID:
- // Reject multiple MODULE_BLOCK's in a single bitstream.
- if (TheModule)
- return Error("Invalid multiple blocks");
- TheModule = M;
- if (std::error_code EC = ParseModule(false, ShouldLazyLoadMetadata))
- return EC;
- if (LazyStreamer)
- return std::error_code();
- break;
- default:
- if (Stream.SkipBlock())
- return Error("Invalid record");
- break;
- }
- continue;
- case BitstreamEntry::Record:
- // There should be no records in the top-level of blocks.
+ if (Entry.ID == bitc::MODULE_BLOCK_ID)
+ return parseModule(false, ShouldLazyLoadMetadata);
- // The ranlib in Xcode 4 will align archive members by appending newlines
- // to the end of them. If this file size is a multiple of 4 but not 8, we
- // have to read and ignore these final 4 bytes :-(
- if (Stream.getAbbrevIDWidth() == 2 && Entry.ID == 2 &&
- Stream.Read(6) == 2 && Stream.Read(24) == 0xa0a0a &&
- Stream.AtEndOfStream())
- return std::error_code();
-
- return Error("Invalid record");
- }
+ if (Stream.SkipBlock())
+ return error("Invalid record");
}
}
ErrorOr<std::string> BitcodeReader::parseModuleTriple() {
if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
- return Error("Invalid record");
+ return error("Invalid record");
SmallVector<uint64_t, 64> Record;
@@ -3180,7 +3150,7 @@ ErrorOr<std::string> BitcodeReader::parseModuleTriple() {
switch (Entry.Kind) {
case BitstreamEntry::SubBlock: // Handled for us already.
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
return Triple;
case BitstreamEntry::Record:
@@ -3193,8 +3163,8 @@ ErrorOr<std::string> BitcodeReader::parseModuleTriple() {
default: break; // Default behavior, ignore unknown content.
case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
std::string S;
- if (ConvertToString(Record, 0, S))
- return Error("Invalid record");
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
Triple = S;
break;
}
@@ -3205,7 +3175,7 @@ ErrorOr<std::string> BitcodeReader::parseModuleTriple() {
}
ErrorOr<std::string> BitcodeReader::parseTriple() {
- if (std::error_code EC = InitStream())
+ if (std::error_code EC = initStream(nullptr))
return EC;
// Sniff for the signature.
@@ -3215,7 +3185,7 @@ ErrorOr<std::string> BitcodeReader::parseTriple() {
Stream.Read(4) != 0xC ||
Stream.Read(4) != 0xE ||
Stream.Read(4) != 0xD)
- return Error("Invalid bitcode signature");
+ return error("Invalid bitcode signature");
// We expect a number of well-defined blocks, though we don't necessarily
// need to understand them all.
@@ -3224,7 +3194,7 @@ ErrorOr<std::string> BitcodeReader::parseTriple() {
switch (Entry.Kind) {
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
return std::error_code();
@@ -3234,7 +3204,7 @@ ErrorOr<std::string> BitcodeReader::parseTriple() {
// Ignore other sub-blocks.
if (Stream.SkipBlock())
- return Error("Malformed block");
+ return error("Malformed block");
continue;
case BitstreamEntry::Record:
@@ -3244,10 +3214,10 @@ ErrorOr<std::string> BitcodeReader::parseTriple() {
}
}
-/// ParseMetadataAttachment - Parse metadata attachments.
-std::error_code BitcodeReader::ParseMetadataAttachment(Function &F) {
+/// Parse metadata attachments.
+std::error_code BitcodeReader::parseMetadataAttachment(Function &F) {
if (Stream.EnterSubBlock(bitc::METADATA_ATTACHMENT_ID))
- return Error("Invalid record");
+ return error("Invalid record");
SmallVector<uint64_t, 64> Record;
while (1) {
@@ -3256,7 +3226,7 @@ std::error_code BitcodeReader::ParseMetadataAttachment(Function &F) {
switch (Entry.Kind) {
case BitstreamEntry::SubBlock: // Handled for us already.
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
return std::error_code();
case BitstreamEntry::Record:
@@ -3272,13 +3242,13 @@ std::error_code BitcodeReader::ParseMetadataAttachment(Function &F) {
case bitc::METADATA_ATTACHMENT: {
unsigned RecordLength = Record.size();
if (Record.empty())
- return Error("Invalid record");
+ return error("Invalid record");
if (RecordLength % 2 == 0) {
// A function attachment.
for (unsigned I = 0; I != RecordLength; I += 2) {
auto K = MDKindMap.find(Record[I]);
if (K == MDKindMap.end())
- return Error("Invalid ID");
+ return error("Invalid ID");
Metadata *MD = MDValueList.getValueFwdRef(Record[I + 1]);
F.setMetadata(K->second, cast<MDNode>(MD));
}
@@ -3292,7 +3262,7 @@ std::error_code BitcodeReader::ParseMetadataAttachment(Function &F) {
DenseMap<unsigned, unsigned>::iterator I =
MDKindMap.find(Kind);
if (I == MDKindMap.end())
- return Error("Invalid ID");
+ return error("Invalid ID");
Metadata *Node = MDValueList.getValueFwdRef(Record[i + 1]);
if (isa<LocalAsMetadata>(Node))
// Drop the attachment. This used to be legal, but there's no
@@ -3308,24 +3278,24 @@ std::error_code BitcodeReader::ParseMetadataAttachment(Function &F) {
}
}
-static std::error_code TypeCheckLoadStoreInst(DiagnosticHandlerFunction DH,
+static std::error_code typeCheckLoadStoreInst(DiagnosticHandlerFunction DH,
Type *ValType, Type *PtrType) {
if (!isa<PointerType>(PtrType))
- return Error(DH, "Load/Store operand is not a pointer type");
+ return error(DH, "Load/Store operand is not a pointer type");
Type *ElemType = cast<PointerType>(PtrType)->getElementType();
if (ValType && ValType != ElemType)
- return Error(DH, "Explicit load/store type does not match pointee type of "
+ return error(DH, "Explicit load/store type does not match pointee type of "
"pointer operand");
if (!PointerType::isLoadableOrStorableType(ElemType))
- return Error(DH, "Cannot load/store from pointer");
+ return error(DH, "Cannot load/store from pointer");
return std::error_code();
}
-/// ParseFunctionBody - Lazily parse the specified function body block.
-std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
+/// Lazily parse the specified function body block.
+std::error_code BitcodeReader::parseFunctionBody(Function *F) {
if (Stream.EnterSubBlock(bitc::FUNCTION_BLOCK_ID))
- return Error("Invalid record");
+ return error("Invalid record");
InstructionList.clear();
unsigned ModuleValueListSize = ValueList.size();
@@ -3356,7 +3326,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
switch (Entry.Kind) {
case BitstreamEntry::Error:
- return Error("Malformed block");
+ return error("Malformed block");
case BitstreamEntry::EndBlock:
goto OutOfRecordLoop;
@@ -3364,27 +3334,27 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
switch (Entry.ID) {
default: // Skip unknown content.
if (Stream.SkipBlock())
- return Error("Invalid record");
+ return error("Invalid record");
break;
case bitc::CONSTANTS_BLOCK_ID:
- if (std::error_code EC = ParseConstants())
+ if (std::error_code EC = parseConstants())
return EC;
NextValueNo = ValueList.size();
break;
case bitc::VALUE_SYMTAB_BLOCK_ID:
- if (std::error_code EC = ParseValueSymbolTable())
+ if (std::error_code EC = parseValueSymbolTable())
return EC;
break;
case bitc::METADATA_ATTACHMENT_ID:
- if (std::error_code EC = ParseMetadataAttachment(*F))
+ if (std::error_code EC = parseMetadataAttachment(*F))
return EC;
break;
case bitc::METADATA_BLOCK_ID:
- if (std::error_code EC = ParseMetadata())
+ if (std::error_code EC = parseMetadata())
return EC;
break;
case bitc::USELIST_BLOCK_ID:
- if (std::error_code EC = ParseUseLists())
+ if (std::error_code EC = parseUseLists())
return EC;
break;
}
@@ -3401,10 +3371,10 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
unsigned BitCode = Stream.readRecord(Entry.ID, Record);
switch (BitCode) {
default: // Default behavior: reject
- return Error("Invalid value");
+ return error("Invalid value");
case bitc::FUNC_CODE_DECLAREBLOCKS: { // DECLAREBLOCKS: [nblocks]
if (Record.size() < 1 || Record[0] == 0)
- return Error("Invalid record");
+ return error("Invalid record");
// Create all the basic blocks for the function.
FunctionBBs.resize(Record[0]);
@@ -3417,7 +3387,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
auto &BBRefs = BBFRI->second;
// Check for invalid basic block references.
if (BBRefs.size() > FunctionBBs.size())
- return Error("Invalid ID");
+ return error("Invalid ID");
assert(!BBRefs.empty() && "Unexpected empty array");
assert(!BBRefs.front() && "Invalid reference to entry block");
for (unsigned I = 0, E = FunctionBBs.size(), RE = BBRefs.size(); I != E;
@@ -3443,7 +3413,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
I = getLastInstruction();
if (!I)
- return Error("Invalid record");
+ return error("Invalid record");
I->setDebugLoc(LastLoc);
I = nullptr;
continue;
@@ -3451,7 +3421,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_DEBUG_LOC: { // DEBUG_LOC: [line, col, scope, ia]
I = getLastInstruction();
if (!I || Record.size() < 4)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned Line = Record[0], Col = Record[1];
unsigned ScopeID = Record[2], IAID = Record[3];
@@ -3471,11 +3441,11 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) ||
OpNum+1 > Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
- int Opc = GetDecodedBinaryOpcode(Record[OpNum++], LHS->getType());
+ int Opc = getDecodedBinaryOpcode(Record[OpNum++], LHS->getType());
if (Opc == -1)
- return Error("Invalid record");
+ return error("Invalid record");
I = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
InstructionList.push_back(I);
if (OpNum < Record.size()) {
@@ -3517,12 +3487,12 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Value *Op;
if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
OpNum+2 != Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
Type *ResTy = getTypeByID(Record[OpNum]);
- int Opc = GetDecodedCastOpcode(Record[OpNum+1]);
+ int Opc = getDecodedCastOpcode(Record[OpNum + 1]);
if (Opc == -1 || !ResTy)
- return Error("Invalid record");
+ return error("Invalid record");
Instruction *Temp = nullptr;
if ((I = UpgradeBitCastInst(Opc, Op, ResTy, Temp))) {
if (Temp) {
@@ -3553,7 +3523,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Value *BasePtr;
if (getValueTypePair(Record, OpNum, NextValueNo, BasePtr))
- return Error("Invalid record");
+ return error("Invalid record");
if (!Ty)
Ty = cast<SequentialType>(BasePtr->getType()->getScalarType())
@@ -3561,14 +3531,14 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
else if (Ty !=
cast<SequentialType>(BasePtr->getType()->getScalarType())
->getElementType())
- return Error(
+ return error(
"Explicit gep type does not match pointee type of pointer operand");
SmallVector<Value*, 16> GEPIdx;
while (OpNum != Record.size()) {
Value *Op;
if (getValueTypePair(Record, OpNum, NextValueNo, Op))
- return Error("Invalid record");
+ return error("Invalid record");
GEPIdx.push_back(Op);
}
@@ -3585,11 +3555,11 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Agg;
if (getValueTypePair(Record, OpNum, NextValueNo, Agg))
- return Error("Invalid record");
+ return error("Invalid record");
unsigned RecSize = Record.size();
if (OpNum == RecSize)
- return Error("EXTRACTVAL: Invalid instruction with 0 indices");
+ return error("EXTRACTVAL: Invalid instruction with 0 indices");
SmallVector<unsigned, 4> EXTRACTVALIdx;
Type *CurTy = Agg->getType();
@@ -3599,13 +3569,13 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
uint64_t Index = Record[OpNum];
if (!IsStruct && !IsArray)
- return Error("EXTRACTVAL: Invalid type");
+ return error("EXTRACTVAL: Invalid type");
if ((unsigned)Index != Index)
- return Error("Invalid value");
+ return error("Invalid value");
if (IsStruct && Index >= CurTy->subtypes().size())
- return Error("EXTRACTVAL: Invalid struct index");
+ return error("EXTRACTVAL: Invalid struct index");
if (IsArray && Index >= CurTy->getArrayNumElements())
- return Error("EXTRACTVAL: Invalid array index");
+ return error("EXTRACTVAL: Invalid array index");
EXTRACTVALIdx.push_back((unsigned)Index);
if (IsStruct)
@@ -3624,14 +3594,14 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Agg;
if (getValueTypePair(Record, OpNum, NextValueNo, Agg))
- return Error("Invalid record");
+ return error("Invalid record");
Value *Val;
if (getValueTypePair(Record, OpNum, NextValueNo, Val))
- return Error("Invalid record");
+ return error("Invalid record");
unsigned RecSize = Record.size();
if (OpNum == RecSize)
- return Error("INSERTVAL: Invalid instruction with 0 indices");
+ return error("INSERTVAL: Invalid instruction with 0 indices");
SmallVector<unsigned, 4> INSERTVALIdx;
Type *CurTy = Agg->getType();
@@ -3641,13 +3611,13 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
uint64_t Index = Record[OpNum];
if (!IsStruct && !IsArray)
- return Error("INSERTVAL: Invalid type");
+ return error("INSERTVAL: Invalid type");
if ((unsigned)Index != Index)
- return Error("Invalid value");
+ return error("Invalid value");
if (IsStruct && Index >= CurTy->subtypes().size())
- return Error("INSERTVAL: Invalid struct index");
+ return error("INSERTVAL: Invalid struct index");
if (IsArray && Index >= CurTy->getArrayNumElements())
- return Error("INSERTVAL: Invalid array index");
+ return error("INSERTVAL: Invalid array index");
INSERTVALIdx.push_back((unsigned)Index);
if (IsStruct)
@@ -3657,7 +3627,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
}
if (CurTy != Val->getType())
- return Error("Inserted value type doesn't match aggregate type");
+ return error("Inserted value type doesn't match aggregate type");
I = InsertValueInst::Create(Agg, Val, INSERTVALIdx);
InstructionList.push_back(I);
@@ -3672,7 +3642,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
popValue(Record, OpNum, NextValueNo, Type::getInt1Ty(Context), Cond))
- return Error("Invalid record");
+ return error("Invalid record");
I = SelectInst::Create(Cond, TrueVal, FalseVal);
InstructionList.push_back(I);
@@ -3687,18 +3657,18 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
getValueTypePair(Record, OpNum, NextValueNo, Cond))
- return Error("Invalid record");
+ return error("Invalid record");
// select condition can be either i1 or [N x i1]
if (VectorType* vector_type =
dyn_cast<VectorType>(Cond->getType())) {
// expect <n x i1>
if (vector_type->getElementType() != Type::getInt1Ty(Context))
- return Error("Invalid type for value");
+ return error("Invalid type for value");
} else {
// expect i1
if (Cond->getType() != Type::getInt1Ty(Context))
- return Error("Invalid type for value");
+ return error("Invalid type for value");
}
I = SelectInst::Create(Cond, TrueVal, FalseVal);
@@ -3711,9 +3681,9 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Value *Vec, *Idx;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
getValueTypePair(Record, OpNum, NextValueNo, Idx))
- return Error("Invalid record");
+ return error("Invalid record");
if (!Vec->getType()->isVectorTy())
- return Error("Invalid type for value");
+ return error("Invalid type for value");
I = ExtractElementInst::Create(Vec, Idx);
InstructionList.push_back(I);
break;
@@ -3723,13 +3693,13 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Vec, *Elt, *Idx;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec))
- return Error("Invalid record");
+ return error("Invalid record");
if (!Vec->getType()->isVectorTy())
- return Error("Invalid type for value");
+ return error("Invalid type for value");
if (popValue(Record, OpNum, NextValueNo,
cast<VectorType>(Vec->getType())->getElementType(), Elt) ||
getValueTypePair(Record, OpNum, NextValueNo, Idx))
- return Error("Invalid record");
+ return error("Invalid record");
I = InsertElementInst::Create(Vec, Elt, Idx);
InstructionList.push_back(I);
break;
@@ -3740,12 +3710,12 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Value *Vec1, *Vec2, *Mask;
if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) ||
popValue(Record, OpNum, NextValueNo, Vec1->getType(), Vec2))
- return Error("Invalid record");
+ return error("Invalid record");
if (getValueTypePair(Record, OpNum, NextValueNo, Mask))
- return Error("Invalid record");
+ return error("Invalid record");
if (!Vec1->getType()->isVectorTy() || !Vec2->getType()->isVectorTy())
- return Error("Invalid type for value");
+ return error("Invalid type for value");
I = new ShuffleVectorInst(Vec1, Vec2, Mask);
InstructionList.push_back(I);
break;
@@ -3763,7 +3733,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) ||
OpNum+1 != Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
if (LHS->getType()->isFPOrFPVectorTy())
I = new FCmpInst((FCmpInst::Predicate)Record[OpNum], LHS, RHS);
@@ -3785,9 +3755,9 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
unsigned OpNum = 0;
Value *Op = nullptr;
if (getValueTypePair(Record, OpNum, NextValueNo, Op))
- return Error("Invalid record");
+ return error("Invalid record");
if (OpNum != Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
I = ReturnInst::Create(Context, Op);
InstructionList.push_back(I);
@@ -3795,10 +3765,10 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
}
case bitc::FUNC_CODE_INST_BR: { // BR: [bb#, bb#, opval] or [bb#]
if (Record.size() != 1 && Record.size() != 3)
- return Error("Invalid record");
+ return error("Invalid record");
BasicBlock *TrueDest = getBasicBlock(Record[0]);
if (!TrueDest)
- return Error("Invalid record");
+ return error("Invalid record");
if (Record.size() == 1) {
I = BranchInst::Create(TrueDest);
@@ -3809,7 +3779,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Value *Cond = getValue(Record, 2, NextValueNo,
Type::getInt1Ty(Context));
if (!FalseDest || !Cond)
- return Error("Invalid record");
+ return error("Invalid record");
I = BranchInst::Create(TrueDest, FalseDest, Cond);
InstructionList.push_back(I);
}
@@ -3829,7 +3799,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Value *Cond = getValue(Record, 2, NextValueNo, OpTy);
BasicBlock *Default = getBasicBlock(Record[3]);
if (!OpTy || !Cond || !Default)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned NumCases = Record[4];
@@ -3847,7 +3817,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
unsigned ActiveWords = 1;
if (ValueBitWidth > 64)
ActiveWords = Record[CurIdx++];
- Low = ReadWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords),
+ Low = readWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords),
ValueBitWidth);
CurIdx += ActiveWords;
@@ -3855,9 +3825,8 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
ActiveWords = 1;
if (ValueBitWidth > 64)
ActiveWords = Record[CurIdx++];
- APInt High =
- ReadWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords),
- ValueBitWidth);
+ APInt High = readWideAPInt(
+ makeArrayRef(&Record[CurIdx], ActiveWords), ValueBitWidth);
CurIdx += ActiveWords;
// FIXME: It is not clear whether values in the range should be
@@ -3881,12 +3850,12 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
// Old SwitchInst format without case ranges.
if (Record.size() < 3 || (Record.size() & 1) == 0)
- return Error("Invalid record");
+ return error("Invalid record");
Type *OpTy = getTypeByID(Record[0]);
Value *Cond = getValue(Record, 1, NextValueNo, OpTy);
BasicBlock *Default = getBasicBlock(Record[2]);
if (!OpTy || !Cond || !Default)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned NumCases = (Record.size()-3)/2;
SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases);
InstructionList.push_back(SI);
@@ -3896,7 +3865,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
BasicBlock *DestBB = getBasicBlock(Record[1+3+i*2]);
if (!CaseVal || !DestBB) {
delete SI;
- return Error("Invalid record");
+ return error("Invalid record");
}
SI->addCase(CaseVal, DestBB);
}
@@ -3905,11 +3874,11 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
}
case bitc::FUNC_CODE_INST_INDIRECTBR: { // INDIRECTBR: [opty, op0, op1, ...]
if (Record.size() < 2)
- return Error("Invalid record");
+ return error("Invalid record");
Type *OpTy = getTypeByID(Record[0]);
Value *Address = getValue(Record, 1, NextValueNo, OpTy);
if (!OpTy || !Address)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned NumDests = Record.size()-2;
IndirectBrInst *IBI = IndirectBrInst::Create(Address, NumDests);
InstructionList.push_back(IBI);
@@ -3918,7 +3887,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
IBI->addDestination(DestBB);
} else {
delete IBI;
- return Error("Invalid record");
+ return error("Invalid record");
}
}
I = IBI;
@@ -3928,7 +3897,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_INST_INVOKE: {
// INVOKE: [attrs, cc, normBB, unwindBB, fnty, op0,op1,op2, ...]
if (Record.size() < 4)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned OpNum = 0;
AttributeSet PAL = getAttributes(Record[OpNum++]);
unsigned CCInfo = Record[OpNum++];
@@ -3938,42 +3907,42 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
FunctionType *FTy = nullptr;
if (CCInfo >> 13 & 1 &&
!(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++]))))
- return Error("Explicit invoke type is not a function type");
+ return error("Explicit invoke type is not a function type");
Value *Callee;
if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
- return Error("Invalid record");
+ return error("Invalid record");
PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType());
if (!CalleeTy)
- return Error("Callee is not a pointer");
+ return error("Callee is not a pointer");
if (!FTy) {
FTy = dyn_cast<FunctionType>(CalleeTy->getElementType());
if (!FTy)
- return Error("Callee is not of pointer to function type");
+ return error("Callee is not of pointer to function type");
} else if (CalleeTy->getElementType() != FTy)
- return Error("Explicit invoke type does not match pointee type of "
+ return error("Explicit invoke type does not match pointee type of "
"callee operand");
if (Record.size() < FTy->getNumParams() + OpNum)
- return Error("Insufficient operands to call");
+ return error("Insufficient operands to call");
SmallVector<Value*, 16> Ops;
for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
Ops.push_back(getValue(Record, OpNum, NextValueNo,
FTy->getParamType(i)));
if (!Ops.back())
- return Error("Invalid record");
+ return error("Invalid record");
}
if (!FTy->isVarArg()) {
if (Record.size() != OpNum)
- return Error("Invalid record");
+ return error("Invalid record");
} else {
// Read type/value pairs for varargs params.
while (OpNum != Record.size()) {
Value *Op;
if (getValueTypePair(Record, OpNum, NextValueNo, Op))
- return Error("Invalid record");
+ return error("Invalid record");
Ops.push_back(Op);
}
}
@@ -3989,7 +3958,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
unsigned Idx = 0;
Value *Val = nullptr;
if (getValueTypePair(Record, Idx, NextValueNo, Val))
- return Error("Invalid record");
+ return error("Invalid record");
I = ResumeInst::Create(Val);
InstructionList.push_back(I);
break;
@@ -4000,10 +3969,10 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
break;
case bitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...]
if (Record.size() < 1 || ((Record.size()-1)&1))
- return Error("Invalid record");
+ return error("Invalid record");
Type *Ty = getTypeByID(Record[0]);
if (!Ty)
- return Error("Invalid record");
+ return error("Invalid record");
PHINode *PN = PHINode::Create(Ty, (Record.size()-1)/2);
InstructionList.push_back(PN);
@@ -4019,28 +3988,42 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
V = getValue(Record, 1+i, NextValueNo, Ty);
BasicBlock *BB = getBasicBlock(Record[2+i]);
if (!V || !BB)
- return Error("Invalid record");
+ return error("Invalid record");
PN->addIncoming(V, BB);
}
I = PN;
break;
}
- case bitc::FUNC_CODE_INST_LANDINGPAD: {
+ case bitc::FUNC_CODE_INST_LANDINGPAD:
+ case bitc::FUNC_CODE_INST_LANDINGPAD_OLD: {
// LANDINGPAD: [ty, val, val, num, (id0,val0 ...)?]
unsigned Idx = 0;
- if (Record.size() < 4)
- return Error("Invalid record");
+ if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD) {
+ if (Record.size() < 3)
+ return error("Invalid record");
+ } else {
+ assert(BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD);
+ if (Record.size() < 4)
+ return error("Invalid record");
+ }
Type *Ty = getTypeByID(Record[Idx++]);
if (!Ty)
- return Error("Invalid record");
- Value *PersFn = nullptr;
- if (getValueTypePair(Record, Idx, NextValueNo, PersFn))
- return Error("Invalid record");
+ return error("Invalid record");
+ if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD) {
+ Value *PersFn = nullptr;
+ if (getValueTypePair(Record, Idx, NextValueNo, PersFn))
+ return error("Invalid record");
+
+ if (!F->hasPersonalityFn())
+ F->setPersonalityFn(cast<Constant>(PersFn));
+ else if (F->getPersonalityFn() != cast<Constant>(PersFn))
+ return error("Personality function mismatch");
+ }
bool IsCleanup = !!Record[Idx++];
unsigned NumClauses = Record[Idx++];
- LandingPadInst *LP = LandingPadInst::Create(Ty, PersFn, NumClauses);
+ LandingPadInst *LP = LandingPadInst::Create(Ty, NumClauses);
LP->setCleanup(IsCleanup);
for (unsigned J = 0; J != NumClauses; ++J) {
LandingPadInst::ClauseType CT =
@@ -4049,7 +4032,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
if (getValueTypePair(Record, Idx, NextValueNo, Val)) {
delete LP;
- return Error("Invalid record");
+ return error("Invalid record");
}
assert((CT != LandingPadInst::Catch ||
@@ -4068,7 +4051,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align]
if (Record.size() != 4)
- return Error("Invalid record");
+ return error("Invalid record");
uint64_t AlignRecord = Record[3];
const uint64_t InAllocaMask = uint64_t(1) << 5;
const uint64_t ExplicitTypeMask = uint64_t(1) << 6;
@@ -4078,7 +4061,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
if ((AlignRecord & ExplicitTypeMask) == 0) {
auto *PTy = dyn_cast_or_null<PointerType>(Ty);
if (!PTy)
- return Error("Old-style alloca with a non-pointer type");
+ return error("Old-style alloca with a non-pointer type");
Ty = PTy->getElementType();
}
Type *OpTy = getTypeByID(Record[1]);
@@ -4089,7 +4072,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
return EC;
}
if (!Ty || !Size)
- return Error("Invalid record");
+ return error("Invalid record");
AllocaInst *AI = new AllocaInst(Ty, Size, Align);
AI->setUsedWithInAlloca(InAlloca);
I = AI;
@@ -4101,13 +4084,13 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Value *Op;
if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
(OpNum + 2 != Record.size() && OpNum + 3 != Record.size()))
- return Error("Invalid record");
+ return error("Invalid record");
Type *Ty = nullptr;
if (OpNum + 3 == Record.size())
Ty = getTypeByID(Record[OpNum++]);
if (std::error_code EC =
- TypeCheckLoadStoreInst(DiagnosticHandler, Ty, Op->getType()))
+ typeCheckLoadStoreInst(DiagnosticHandler, Ty, Op->getType()))
return EC;
if (!Ty)
Ty = cast<PointerType>(Op->getType())->getElementType();
@@ -4126,24 +4109,24 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Value *Op;
if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
(OpNum + 4 != Record.size() && OpNum + 5 != Record.size()))
- return Error("Invalid record");
+ return error("Invalid record");
Type *Ty = nullptr;
if (OpNum + 5 == Record.size())
Ty = getTypeByID(Record[OpNum++]);
if (std::error_code EC =
- TypeCheckLoadStoreInst(DiagnosticHandler, Ty, Op->getType()))
+ typeCheckLoadStoreInst(DiagnosticHandler, Ty, Op->getType()))
return EC;
if (!Ty)
Ty = cast<PointerType>(Op->getType())->getElementType();
- AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
+ AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
if (Ordering == NotAtomic || Ordering == Release ||
Ordering == AcquireRelease)
- return Error("Invalid record");
+ return error("Invalid record");
if (Ordering != NotAtomic && Record[OpNum] == 0)
- return Error("Invalid record");
- SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
+ return error("Invalid record");
+ SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
unsigned Align;
if (std::error_code EC = parseAlignmentValue(Record[OpNum], Align))
@@ -4164,9 +4147,9 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
cast<PointerType>(Ptr->getType())->getElementType(),
Val)) ||
OpNum + 2 != Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
- if (std::error_code EC = TypeCheckLoadStoreInst(
+ if (std::error_code EC = typeCheckLoadStoreInst(
DiagnosticHandler, Val->getType(), Ptr->getType()))
return EC;
unsigned Align;
@@ -4188,18 +4171,18 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
cast<PointerType>(Ptr->getType())->getElementType(),
Val)) ||
OpNum + 4 != Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
- if (std::error_code EC = TypeCheckLoadStoreInst(
+ if (std::error_code EC = typeCheckLoadStoreInst(
DiagnosticHandler, Val->getType(), Ptr->getType()))
return EC;
- AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
+ AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
if (Ordering == NotAtomic || Ordering == Acquire ||
Ordering == AcquireRelease)
- return Error("Invalid record");
- SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
+ return error("Invalid record");
+ SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
if (Ordering != NotAtomic && Record[OpNum] == 0)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned Align;
if (std::error_code EC = parseAlignmentValue(Record[OpNum], Align))
@@ -4222,13 +4205,13 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Cmp)) ||
popValue(Record, OpNum, NextValueNo, Cmp->getType(), New) ||
Record.size() < OpNum + 3 || Record.size() > OpNum + 5)
- return Error("Invalid record");
- AtomicOrdering SuccessOrdering = GetDecodedOrdering(Record[OpNum+1]);
+ return error("Invalid record");
+ AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]);
if (SuccessOrdering == NotAtomic || SuccessOrdering == Unordered)
- return Error("Invalid record");
- SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]);
+ return error("Invalid record");
+ SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]);
- if (std::error_code EC = TypeCheckLoadStoreInst(
+ if (std::error_code EC = typeCheckLoadStoreInst(
DiagnosticHandler, Cmp->getType(), Ptr->getType()))
return EC;
AtomicOrdering FailureOrdering;
@@ -4236,7 +4219,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
FailureOrdering =
AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering);
else
- FailureOrdering = GetDecodedOrdering(Record[OpNum+3]);
+ FailureOrdering = getDecodedOrdering(Record[OpNum + 3]);
I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering,
SynchScope);
@@ -4263,15 +4246,15 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
popValue(Record, OpNum, NextValueNo,
cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
OpNum+4 != Record.size())
- return Error("Invalid record");
- AtomicRMWInst::BinOp Operation = GetDecodedRMWOperation(Record[OpNum]);
+ return error("Invalid record");
+ AtomicRMWInst::BinOp Operation = getDecodedRMWOperation(Record[OpNum]);
if (Operation < AtomicRMWInst::FIRST_BINOP ||
Operation > AtomicRMWInst::LAST_BINOP)
- return Error("Invalid record");
- AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
+ return error("Invalid record");
+ AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
if (Ordering == NotAtomic || Ordering == Unordered)
- return Error("Invalid record");
- SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
+ return error("Invalid record");
+ SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]);
InstructionList.push_back(I);
@@ -4279,12 +4262,12 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
}
case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, synchscope]
if (2 != Record.size())
- return Error("Invalid record");
- AtomicOrdering Ordering = GetDecodedOrdering(Record[0]);
+ return error("Invalid record");
+ AtomicOrdering Ordering = getDecodedOrdering(Record[0]);
if (Ordering == NotAtomic || Ordering == Unordered ||
Ordering == Monotonic)
- return Error("Invalid record");
- SynchronizationScope SynchScope = GetDecodedSynchScope(Record[1]);
+ return error("Invalid record");
+ SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]);
I = new FenceInst(Context, Ordering, SynchScope);
InstructionList.push_back(I);
break;
@@ -4292,7 +4275,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
case bitc::FUNC_CODE_INST_CALL: {
// CALL: [paramattrs, cc, fnty, fnid, arg0, arg1...]
if (Record.size() < 3)
- return Error("Invalid record");
+ return error("Invalid record");
unsigned OpNum = 0;
AttributeSet PAL = getAttributes(Record[OpNum++]);
@@ -4301,24 +4284,24 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
FunctionType *FTy = nullptr;
if (CCInfo >> 15 & 1 &&
!(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++]))))
- return Error("Explicit call type is not a function type");
+ return error("Explicit call type is not a function type");
Value *Callee;
if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
- return Error("Invalid record");
+ return error("Invalid record");
PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
if (!OpTy)
- return Error("Callee is not a pointer type");
+ return error("Callee is not a pointer type");
if (!FTy) {
FTy = dyn_cast<FunctionType>(OpTy->getElementType());
if (!FTy)
- return Error("Callee is not of pointer to function type");
+ return error("Callee is not of pointer to function type");
} else if (OpTy->getElementType() != FTy)
- return Error("Explicit call type does not match pointee type of "
+ return error("Explicit call type does not match pointee type of "
"callee operand");
if (Record.size() < FTy->getNumParams() + OpNum)
- return Error("Insufficient operands to call");
+ return error("Insufficient operands to call");
SmallVector<Value*, 16> Args;
// Read the fixed params.
@@ -4329,18 +4312,18 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
Args.push_back(getValue(Record, OpNum, NextValueNo,
FTy->getParamType(i)));
if (!Args.back())
- return Error("Invalid record");
+ return error("Invalid record");
}
// Read type/value pairs for varargs params.
if (!FTy->isVarArg()) {
if (OpNum != Record.size())
- return Error("Invalid record");
+ return error("Invalid record");
} else {
while (OpNum != Record.size()) {
Value *Op;
if (getValueTypePair(Record, OpNum, NextValueNo, Op))
- return Error("Invalid record");
+ return error("Invalid record");
Args.push_back(Op);
}
}
@@ -4360,12 +4343,12 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
}
case bitc::FUNC_CODE_INST_VAARG: { // VAARG: [valistty, valist, instty]
if (Record.size() < 3)
- return Error("Invalid record");
+ return error("Invalid record");
Type *OpTy = getTypeByID(Record[0]);
Value *Op = getValue(Record, 1, NextValueNo, OpTy);
Type *ResTy = getTypeByID(Record[2]);
if (!OpTy || !Op || !ResTy)
- return Error("Invalid record");
+ return error("Invalid record");
I = new VAArgInst(Op, ResTy);
InstructionList.push_back(I);
break;
@@ -4376,7 +4359,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
// this file.
if (!CurBB) {
delete I;
- return Error("Invalid instruction with no BB");
+ return error("Invalid instruction with no BB");
}
CurBB->getInstList().push_back(I);
@@ -4388,7 +4371,7 @@ std::error_code BitcodeReader::ParseFunctionBody(Function *F) {
// Non-void values get registered in the value table for future use.
if (I && !I->getType()->isVoidTy())
- ValueList.AssignValue(I, NextValueNo++);
+ ValueList.assignValue(I, NextValueNo++);
}
OutOfRecordLoop:
@@ -4403,7 +4386,7 @@ OutOfRecordLoop:
delete A;
}
}
- return Error("Never resolved value found in function");
+ return error("Never resolved value found in function");
}
}
@@ -4418,15 +4401,15 @@ OutOfRecordLoop:
}
/// Find the function body in the bitcode stream
-std::error_code BitcodeReader::FindFunctionInStream(
+std::error_code BitcodeReader::findFunctionInStream(
Function *F,
DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator) {
while (DeferredFunctionInfoIterator->second == 0) {
if (Stream.AtEndOfStream())
- return Error("Could not find function in stream");
+ return error("Could not find function in stream");
// ParseModule will parse the next body in the stream and set its
// position in the DeferredFunctionInfo map.
- if (std::error_code EC = ParseModule(true))
+ if (std::error_code EC = parseModule(true))
return EC;
}
return std::error_code();
@@ -4451,14 +4434,14 @@ std::error_code BitcodeReader::materialize(GlobalValue *GV) {
assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!");
// If its position is recorded as 0, its body is somewhere in the stream
// but we haven't seen it yet.
- if (DFII->second == 0 && LazyStreamer)
- if (std::error_code EC = FindFunctionInStream(F, DFII))
+ if (DFII->second == 0 && IsStreamed)
+ if (std::error_code EC = findFunctionInStream(F, DFII))
return EC;
// Move the bit stream to the saved position of the deferred function body.
Stream.JumpToBit(DFII->second);
- if (std::error_code EC = ParseFunctionBody(F))
+ if (std::error_code EC = parseFunctionBody(F))
return EC;
F->setIsMaterializable(false);
@@ -4529,12 +4512,12 @@ std::error_code BitcodeReader::materializeModule(Module *M) {
// pointing to the END_BLOCK record after them. Now make sure the rest
// of the bits in the module have been read.
if (NextUnreadBit)
- ParseModule(true);
+ parseModule(true);
// Check that all block address forward references got resolved (as we
// promised above).
if (!BasicBlockFwdRefs.empty())
- return Error("Never resolved function from blockaddress");
+ return error("Never resolved function from blockaddress");
// Upgrade any intrinsic calls that slipped through (should not happen!) and
// delete the old functions to clean up. We can't do this unless the entire
@@ -4566,24 +4549,25 @@ std::vector<StructType *> BitcodeReader::getIdentifiedStructTypes() const {
return IdentifiedStructTypes;
}
-std::error_code BitcodeReader::InitStream() {
- if (LazyStreamer)
- return InitLazyStream();
- return InitStreamFromBuffer();
+std::error_code
+BitcodeReader::initStream(std::unique_ptr<DataStreamer> Streamer) {
+ if (Streamer)
+ return initLazyStream(std::move(Streamer));
+ return initStreamFromBuffer();
}
-std::error_code BitcodeReader::InitStreamFromBuffer() {
+std::error_code BitcodeReader::initStreamFromBuffer() {
const unsigned char *BufPtr = (const unsigned char*)Buffer->getBufferStart();
const unsigned char *BufEnd = BufPtr+Buffer->getBufferSize();
if (Buffer->getBufferSize() & 3)
- return Error("Invalid bitcode signature");
+ return error("Invalid bitcode signature");
// If we have a wrapper header, parse it and ignore the non-bc file contents.
// The magic number is 0x0B17C0DE stored in little endian.
if (isBitcodeWrapper(BufPtr, BufEnd))
if (SkipBitcodeWrapperHeader(BufPtr, BufEnd, true))
- return Error("Invalid bitcode wrapper header");
+ return error("Invalid bitcode wrapper header");
StreamFile.reset(new BitstreamReader(BufPtr, BufEnd));
Stream.init(&*StreamFile);
@@ -4591,20 +4575,22 @@ std::error_code BitcodeReader::InitStreamFromBuffer() {
return std::error_code();
}
-std::error_code BitcodeReader::InitLazyStream() {
+std::error_code
+BitcodeReader::initLazyStream(std::unique_ptr<DataStreamer> Streamer) {
// Check and strip off the bitcode wrapper; BitstreamReader expects never to
// see it.
- auto OwnedBytes = llvm::make_unique<StreamingMemoryObject>(LazyStreamer);
+ auto OwnedBytes =
+ llvm::make_unique<StreamingMemoryObject>(std::move(Streamer));
StreamingMemoryObject &Bytes = *OwnedBytes;
StreamFile = llvm::make_unique<BitstreamReader>(std::move(OwnedBytes));
Stream.init(&*StreamFile);
unsigned char buf[16];
if (Bytes.readBytes(buf, 16, 0) != 16)
- return Error("Invalid bitcode signature");
+ return error("Invalid bitcode signature");
if (!isBitcode(buf, buf + 16))
- return Error("Invalid bitcode signature");
+ return error("Invalid bitcode signature");
if (isBitcodeWrapper(buf, buf + 4)) {
const unsigned char *bitcodeStart = buf;
@@ -4632,7 +4618,7 @@ class BitcodeErrorCategoryType : public std::error_category {
llvm_unreachable("Unknown error type!");
}
};
-}
+} // namespace
static ManagedStatic<BitcodeErrorCategoryType> ErrorCategory;
@@ -4644,83 +4630,86 @@ const std::error_category &llvm::BitcodeErrorCategory() {
// External interface
//===----------------------------------------------------------------------===//
-/// \brief Get a lazy one-at-time loading module from bitcode.
-///
-/// This isn't always used in a lazy context. In particular, it's also used by
-/// \a parseBitcodeFile(). If this is truly lazy, then we need to eagerly pull
-/// in forward-referenced functions from block address references.
-///
-/// \param[in] WillMaterializeAll Set to \c true if the caller promises to
-/// materialize everything -- in particular, if this isn't truly lazy.
-static ErrorOr<Module *>
-getLazyBitcodeModuleImpl(std::unique_ptr<MemoryBuffer> &&Buffer,
- LLVMContext &Context, bool WillMaterializeAll,
- DiagnosticHandlerFunction DiagnosticHandler,
- bool ShouldLazyLoadMetadata = false) {
- Module *M = new Module(Buffer->getBufferIdentifier(), Context);
- BitcodeReader *R =
- new BitcodeReader(Buffer.get(), Context, DiagnosticHandler);
+static ErrorOr<std::unique_ptr<Module>>
+getBitcodeModuleImpl(std::unique_ptr<DataStreamer> Streamer, StringRef Name,
+ BitcodeReader *R, LLVMContext &Context,
+ bool MaterializeAll, bool ShouldLazyLoadMetadata) {
+ std::unique_ptr<Module> M = make_unique<Module>(Name, Context);
M->setMaterializer(R);
auto cleanupOnError = [&](std::error_code EC) {
R->releaseBuffer(); // Never take ownership on error.
- delete M; // Also deletes R.
return EC;
};
// Delay parsing Metadata if ShouldLazyLoadMetadata is true.
- if (std::error_code EC = R->ParseBitcodeInto(M, ShouldLazyLoadMetadata))
+ if (std::error_code EC = R->parseBitcodeInto(std::move(Streamer), M.get(),
+ ShouldLazyLoadMetadata))
return cleanupOnError(EC);
- if (!WillMaterializeAll)
+ if (MaterializeAll) {
+ // Read in the entire module, and destroy the BitcodeReader.
+ if (std::error_code EC = M->materializeAllPermanently())
+ return cleanupOnError(EC);
+ } else {
// Resolve forward references from blockaddresses.
if (std::error_code EC = R->materializeForwardReferencedFunctions())
return cleanupOnError(EC);
+ }
+ return std::move(M);
+}
+
+/// \brief Get a lazy one-at-time loading module from bitcode.
+///
+/// This isn't always used in a lazy context. In particular, it's also used by
+/// \a parseBitcodeFile(). If this is truly lazy, then we need to eagerly pull
+/// in forward-referenced functions from block address references.
+///
+/// \param[in] MaterializeAll Set to \c true if we should materialize
+/// everything.
+static ErrorOr<std::unique_ptr<Module>>
+getLazyBitcodeModuleImpl(std::unique_ptr<MemoryBuffer> &&Buffer,
+ LLVMContext &Context, bool MaterializeAll,
+ DiagnosticHandlerFunction DiagnosticHandler,
+ bool ShouldLazyLoadMetadata = false) {
+ BitcodeReader *R =
+ new BitcodeReader(Buffer.get(), Context, DiagnosticHandler);
+
+ ErrorOr<std::unique_ptr<Module>> Ret =
+ getBitcodeModuleImpl(nullptr, Buffer->getBufferIdentifier(), R, Context,
+ MaterializeAll, ShouldLazyLoadMetadata);
+ if (!Ret)
+ return Ret;
Buffer.release(); // The BitcodeReader owns it now.
- return M;
+ return Ret;
}
-ErrorOr<Module *>
-llvm::getLazyBitcodeModule(std::unique_ptr<MemoryBuffer> &&Buffer,
- LLVMContext &Context,
- DiagnosticHandlerFunction DiagnosticHandler,
- bool ShouldLazyLoadMetadata) {
+ErrorOr<std::unique_ptr<Module>> llvm::getLazyBitcodeModule(
+ std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context,
+ DiagnosticHandlerFunction DiagnosticHandler, bool ShouldLazyLoadMetadata) {
return getLazyBitcodeModuleImpl(std::move(Buffer), Context, false,
DiagnosticHandler, ShouldLazyLoadMetadata);
}
-ErrorOr<std::unique_ptr<Module>>
-llvm::getStreamedBitcodeModule(StringRef Name, DataStreamer *Streamer,
- LLVMContext &Context,
- DiagnosticHandlerFunction DiagnosticHandler) {
+ErrorOr<std::unique_ptr<Module>> llvm::getStreamedBitcodeModule(
+ StringRef Name, std::unique_ptr<DataStreamer> Streamer,
+ LLVMContext &Context, DiagnosticHandlerFunction DiagnosticHandler) {
std::unique_ptr<Module> M = make_unique<Module>(Name, Context);
- BitcodeReader *R = new BitcodeReader(Streamer, Context, DiagnosticHandler);
- M->setMaterializer(R);
- if (std::error_code EC = R->ParseBitcodeInto(M.get()))
- return EC;
- return std::move(M);
+ BitcodeReader *R = new BitcodeReader(Context, DiagnosticHandler);
+
+ return getBitcodeModuleImpl(std::move(Streamer), Name, R, Context, false,
+ false);
}
-ErrorOr<Module *>
+ErrorOr<std::unique_ptr<Module>>
llvm::parseBitcodeFile(MemoryBufferRef Buffer, LLVMContext &Context,
DiagnosticHandlerFunction DiagnosticHandler) {
std::unique_ptr<MemoryBuffer> Buf = MemoryBuffer::getMemBuffer(Buffer, false);
- ErrorOr<Module *> ModuleOrErr = getLazyBitcodeModuleImpl(
- std::move(Buf), Context, true, DiagnosticHandler);
- if (!ModuleOrErr)
- return ModuleOrErr;
- Module *M = ModuleOrErr.get();
- // Read in the entire module, and destroy the BitcodeReader.
- if (std::error_code EC = M->materializeAllPermanently()) {
- delete M;
- return EC;
- }
-
+ return getLazyBitcodeModuleImpl(std::move(Buf), Context, true,
+ DiagnosticHandler);
// TODO: Restore the use-lists to the in-memory state when the bitcode was
// written. We must defer until the Module has been fully materialized.
-
- return M;
}
std::string
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index 97caefb4c494..e79eeb079ed8 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -232,6 +232,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_STACK_PROTECT_REQ;
case Attribute::StackProtectStrong:
return bitc::ATTR_KIND_STACK_PROTECT_STRONG;
+ case Attribute::SafeStack:
+ return bitc::ATTR_KIND_SAFESTACK;
case Attribute::StructRet:
return bitc::ATTR_KIND_STRUCT_RET;
case Attribute::SanitizeAddress:
@@ -693,7 +695,7 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
for (const Function &F : *M) {
// FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment,
// section, visibility, gc, unnamed_addr, prologuedata,
- // dllstorageclass, comdat, prefixdata]
+ // dllstorageclass, comdat, prefixdata, personalityfn]
Vals.push_back(VE.getTypeID(F.getFunctionType()));
Vals.push_back(F.getCallingConv());
Vals.push_back(F.isDeclaration());
@@ -710,6 +712,8 @@ static void WriteModuleInfo(const Module *M, const ValueEnumerator &VE,
Vals.push_back(F.hasComdat() ? VE.getComdatID(F.getComdat()) : 0);
Vals.push_back(F.hasPrefixData() ? (VE.getValueID(F.getPrefixData()) + 1)
: 0);
+ Vals.push_back(
+ F.hasPersonalityFn() ? (VE.getValueID(F.getPersonalityFn()) + 1) : 0);
unsigned AbbrevToUse = 0;
Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse);
@@ -1857,7 +1861,6 @@ static void WriteInstruction(const Instruction &I, unsigned InstID,
const LandingPadInst &LP = cast<LandingPadInst>(I);
Code = bitc::FUNC_CODE_INST_LANDINGPAD;
Vals.push_back(VE.getTypeID(LP.getType()));
- PushValueAndType(LP.getPersonalityFn(), InstID, Vals, VE);
Vals.push_back(LP.isCleanup());
Vals.push_back(LP.getNumClauses());
for (unsigned I = 0, E = LP.getNumClauses(); I != E; ++I) {
@@ -2403,10 +2406,7 @@ enum {
static void WriteInt32ToBuffer(uint32_t Value, SmallVectorImpl<char> &Buffer,
uint32_t &Position) {
- Buffer[Position + 0] = (unsigned char) (Value >> 0);
- Buffer[Position + 1] = (unsigned char) (Value >> 8);
- Buffer[Position + 2] = (unsigned char) (Value >> 16);
- Buffer[Position + 3] = (unsigned char) (Value >> 24);
+ support::endian::write32le(&Buffer[Position], Value);
Position += 4;
}
diff --git a/lib/Bitcode/Writer/BitcodeWriterPass.cpp b/lib/Bitcode/Writer/BitcodeWriterPass.cpp
index 3165743576ec..c890380e07df 100644
--- a/lib/Bitcode/Writer/BitcodeWriterPass.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriterPass.cpp
@@ -41,7 +41,7 @@ namespace {
return false;
}
};
-}
+} // namespace
char WriteBitcodePass::ID = 0;
diff --git a/lib/Bitcode/Writer/CMakeLists.txt b/lib/Bitcode/Writer/CMakeLists.txt
index f097b097c337..82dc6b24137d 100644
--- a/lib/Bitcode/Writer/CMakeLists.txt
+++ b/lib/Bitcode/Writer/CMakeLists.txt
@@ -3,4 +3,7 @@ add_llvm_library(LLVMBitWriter
BitcodeWriter.cpp
BitcodeWriterPass.cpp
ValueEnumerator.cpp
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/Bitcode/Writer/ValueEnumerator.cpp b/lib/Bitcode/Writer/ValueEnumerator.cpp
index 6c517f5ed8d0..53c3a4094ea6 100644
--- a/lib/Bitcode/Writer/ValueEnumerator.cpp
+++ b/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -52,7 +52,7 @@ struct OrderMap {
IDs[V].first = ID;
}
};
-}
+} // namespace
static void orderValue(const Value *V, OrderMap &OM) {
if (OM.lookup(V).first)
@@ -93,6 +93,9 @@ static OrderMap orderModule(const Module &M) {
if (F.hasPrologueData())
if (!isa<GlobalValue>(F.getPrologueData()))
orderValue(F.getPrologueData(), OM);
+ if (F.hasPersonalityFn())
+ if (!isa<GlobalValue>(F.getPersonalityFn()))
+ orderValue(F.getPersonalityFn(), OM);
}
OM.LastGlobalConstantID = OM.size();
@@ -274,6 +277,8 @@ static UseListOrderStack predictUseListOrder(const Module &M) {
predictValueUseListOrder(F.getPrefixData(), nullptr, OM, Stack);
if (F.hasPrologueData())
predictValueUseListOrder(F.getPrologueData(), nullptr, OM, Stack);
+ if (F.hasPersonalityFn())
+ predictValueUseListOrder(F.getPersonalityFn(), nullptr, OM, Stack);
}
return Stack;
@@ -291,44 +296,45 @@ ValueEnumerator::ValueEnumerator(const Module &M,
UseListOrders = predictUseListOrder(M);
// Enumerate the global variables.
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- EnumerateValue(I);
+ for (const GlobalVariable &GV : M.globals())
+ EnumerateValue(&GV);
// Enumerate the functions.
- for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I) {
- EnumerateValue(I);
- EnumerateAttributes(cast<Function>(I)->getAttributes());
+ for (const Function & F : M) {
+ EnumerateValue(&F);
+ EnumerateAttributes(F.getAttributes());
}
// Enumerate the aliases.
- for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
- I != E; ++I)
- EnumerateValue(I);
+ for (const GlobalAlias &GA : M.aliases())
+ EnumerateValue(&GA);
// Remember what is the cutoff between globalvalue's and other constants.
unsigned FirstConstant = Values.size();
// Enumerate the global variable initializers.
- for (Module::const_global_iterator I = M.global_begin(), E = M.global_end();
- I != E; ++I)
- if (I->hasInitializer())
- EnumerateValue(I->getInitializer());
+ for (const GlobalVariable &GV : M.globals())
+ if (GV.hasInitializer())
+ EnumerateValue(GV.getInitializer());
// Enumerate the aliasees.
- for (Module::const_alias_iterator I = M.alias_begin(), E = M.alias_end();
- I != E; ++I)
- EnumerateValue(I->getAliasee());
+ for (const GlobalAlias &GA : M.aliases())
+ EnumerateValue(GA.getAliasee());
// Enumerate the prefix data constants.
- for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
- if (I->hasPrefixData())
- EnumerateValue(I->getPrefixData());
+ for (const Function &F : M)
+ if (F.hasPrefixData())
+ EnumerateValue(F.getPrefixData());
// Enumerate the prologue data constants.
+ for (const Function &F : M)
+ if (F.hasPrologueData())
+ EnumerateValue(F.getPrologueData());
+
+ // Enumerate the personality functions.
for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
- if (I->hasPrologueData())
- EnumerateValue(I->getPrologueData());
+ if (I->hasPersonalityFn())
+ EnumerateValue(I->getPersonalityFn());
// Enumerate the metadata type.
//
diff --git a/lib/Bitcode/Writer/ValueEnumerator.h b/lib/Bitcode/Writer/ValueEnumerator.h
index 92d166e3ba92..b2daa48f1357 100644
--- a/lib/Bitcode/Writer/ValueEnumerator.h
+++ b/lib/Bitcode/Writer/ValueEnumerator.h
@@ -203,6 +203,6 @@ private:
void EnumerateNamedMetadata(const Module &M);
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index ce10998768d1..d00c10f5802f 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -19,3 +19,4 @@ add_subdirectory(LineEditor)
add_subdirectory(ProfileData)
add_subdirectory(Fuzzer)
add_subdirectory(Passes)
+add_subdirectory(LibDriver)
diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.h b/lib/CodeGen/AggressiveAntiDepBreaker.h
index 18c8bb591c1c..63d2085148b6 100644
--- a/lib/CodeGen/AggressiveAntiDepBreaker.h
+++ b/lib/CodeGen/AggressiveAntiDepBreaker.h
@@ -174,6 +174,6 @@ class RegisterClassInfo;
RenameOrderType& RenameOrder,
std::map<unsigned, unsigned> &RenameMap);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AntiDepBreaker.h b/lib/CodeGen/AntiDepBreaker.h
index a61a8efa4da0..7985241c6635 100644
--- a/lib/CodeGen/AntiDepBreaker.h
+++ b/lib/CodeGen/AntiDepBreaker.h
@@ -62,6 +62,6 @@ public:
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/AddressPool.h b/lib/CodeGen/AsmPrinter/AddressPool.h
index 211fc98c7f6f..e0ce3f90bc34 100644
--- a/lib/CodeGen/AsmPrinter/AddressPool.h
+++ b/lib/CodeGen/AsmPrinter/AddressPool.h
@@ -48,5 +48,5 @@ public:
void resetUsedFlag() { HasBeenUsed = false; }
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 2e3b83a09520..95da5887658e 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -151,7 +151,7 @@ void AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
}
StringRef AsmPrinter::getTargetTriple() const {
- return TM.getTargetTriple();
+ return TM.getTargetTriple().str();
}
/// getCurrentSection() - Return the current section we are emitting to.
@@ -172,7 +172,6 @@ void AsmPrinter::getAnalysisUsage(AnalysisUsage &AU) const {
bool AsmPrinter::doInitialization(Module &M) {
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
- MMI->AnalyzeModule(M);
// Initialize TargetLoweringObjectFile.
const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
@@ -222,7 +221,8 @@ bool AsmPrinter::doInitialization(Module &M) {
// We're at the module level. Construct MCSubtarget from the default CPU
// and target triple.
std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
- TM.getTargetTriple(), TM.getTargetCPU(), TM.getTargetFeatureString()));
+ TM.getTargetTriple().str(), TM.getTargetCPU(),
+ TM.getTargetFeatureString()));
OutStreamer->AddComment("Start of file scope inline assembly");
OutStreamer->AddBlankLine();
EmitInlineAsm(M.getModuleInlineAsm()+"\n", *STI, TM.Options.MCOptions);
@@ -232,7 +232,7 @@ bool AsmPrinter::doInitialization(Module &M) {
if (MAI->doesSupportDebugInformation()) {
bool skip_dwarf = false;
- if (Triple(TM.getTargetTriple()).isKnownWindowsMSVCEnvironment()) {
+ if (TM.getTargetTriple().isKnownWindowsMSVCEnvironment()) {
Handlers.push_back(HandlerInfo(new WinCodeViewLineTables(this),
DbgTimerName,
CodeViewLineTablesGroupName));
@@ -900,12 +900,11 @@ void AsmPrinter::EmitFunctionBody() {
if (MAI->hasDotTypeDotSizeDirective()) {
// We can get the size as difference between the function label and the
// temp label.
- const MCExpr *SizeExp =
- MCBinaryExpr::createSub(MCSymbolRefExpr::create(CurrentFnEnd, OutContext),
- MCSymbolRefExpr::create(CurrentFnSymForSize,
- OutContext),
- OutContext);
- OutStreamer->emitELFSize(cast<MCSymbolELF>(CurrentFnSym), SizeExp);
+ const MCExpr *SizeExp = MCBinaryExpr::createSub(
+ MCSymbolRefExpr::create(CurrentFnEnd, OutContext),
+ MCSymbolRefExpr::create(CurrentFnSymForSize, OutContext), OutContext);
+ if (auto Sym = dyn_cast<MCSymbolELF>(CurrentFnSym))
+ OutStreamer->emitELFSize(Sym, SizeExp);
}
for (const HandlerInfo &HI : Handlers) {
@@ -1043,8 +1042,7 @@ bool AsmPrinter::doFinalization(Module &M) {
if (!ModuleFlags.empty())
TLOF.emitModuleFlags(*OutStreamer, ModuleFlags, *Mang, TM);
- Triple TT(TM.getTargetTriple());
- if (TT.isOSBinFormatELF()) {
+ if (TM.getTargetTriple().isOSBinFormatELF()) {
MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
// Output stubs for external and common global variables.
@@ -1591,25 +1589,7 @@ void AsmPrinter::EmitInt32(int Value) const {
/// .set if it avoids relocations.
void AsmPrinter::EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
unsigned Size) const {
- if (!MAI->doesDwarfUseRelocationsAcrossSections())
- if (OutStreamer->emitAbsoluteSymbolDiff(Hi, Lo, Size))
- return;
-
- // Get the Hi-Lo expression.
- const MCExpr *Diff =
- MCBinaryExpr::createSub(MCSymbolRefExpr::create(Hi, OutContext),
- MCSymbolRefExpr::create(Lo, OutContext),
- OutContext);
-
- if (!MAI->doesSetDirectiveSuppressesReloc()) {
- OutStreamer->EmitValue(Diff, Size);
- return;
- }
-
- // Otherwise, emit with .set (aka assignment).
- MCSymbol *SetLabel = createTempSymbol("set");
- OutStreamer->EmitAssignment(SetLabel, Diff);
- OutStreamer->EmitSymbolValue(SetLabel, Size);
+ OutStreamer->emitAbsoluteSymbolDiff(Hi, Lo, Size);
}
/// EmitLabelPlusOffset - Emit something like ".long Label+Offset"
@@ -1811,40 +1791,30 @@ static int isRepeatedByteSequence(const ConstantDataSequential *V) {
/// composed of a repeated sequence of identical bytes and return the
/// byte value. If it is not a repeated sequence, return -1.
static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) {
-
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- if (CI->getBitWidth() > 64) return -1;
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSizeInBits(V->getType());
+ assert(Size % 8 == 0);
- uint64_t Size =
- TM.getDataLayout()->getTypeAllocSize(V->getType());
- uint64_t Value = CI->getZExtValue();
+ // Extend the element to take zero padding into account.
+ APInt Value = CI->getValue().zextOrSelf(Size);
+ if (!Value.isSplat(8))
+ return -1;
- // Make sure the constant is at least 8 bits long and has a power
- // of 2 bit width. This guarantees the constant bit width is
- // always a multiple of 8 bits, avoiding issues with padding out
- // to Size and other such corner cases.
- if (CI->getBitWidth() < 8 || !isPowerOf2_64(CI->getBitWidth())) return -1;
-
- uint8_t Byte = static_cast<uint8_t>(Value);
-
- for (unsigned i = 1; i < Size; ++i) {
- Value >>= 8;
- if (static_cast<uint8_t>(Value) != Byte) return -1;
- }
- return Byte;
+ return Value.zextOrTrunc(8).getZExtValue();
}
if (const ConstantArray *CA = dyn_cast<ConstantArray>(V)) {
// Make sure all array elements are sequences of the same repeated
// byte.
assert(CA->getNumOperands() != 0 && "Should be a CAZ");
- int Byte = isRepeatedByteSequence(CA->getOperand(0), TM);
- if (Byte == -1) return -1;
-
- for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i) {
- int ThisByte = isRepeatedByteSequence(CA->getOperand(i), TM);
- if (ThisByte == -1) return -1;
- if (Byte != ThisByte) return -1;
- }
+ Constant *Op0 = CA->getOperand(0);
+ int Byte = isRepeatedByteSequence(Op0, TM);
+ if (Byte == -1)
+ return -1;
+
+ // All array elements must be equal.
+ for (unsigned i = 1, e = CA->getNumOperands(); i != e; ++i)
+ if (CA->getOperand(i) != Op0)
+ return -1;
return Byte;
}
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
index 7dbfddf60691..8ee613bcdb43 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
@@ -157,24 +157,20 @@ void AsmPrinter::EmitTTypeReference(const GlobalValue *GV,
OutStreamer->EmitIntValue(0, GetSizeOfEncodedValue(Encoding));
}
-/// EmitSectionOffset - Emit the 4-byte offset of Label from the start of its
-/// section. This can be done with a special directive if the target supports
-/// it (e.g. cygwin) or by emitting it as an offset from a label at the start
-/// of the section.
-///
-/// SectionLabel is a temporary label emitted at the start of the section that
-/// Label lives in.
-void AsmPrinter::emitSectionOffset(const MCSymbol *Label) const {
- // On COFF targets, we have to emit the special .secrel32 directive.
- if (MAI->needsDwarfSectionOffsetDirective()) {
- OutStreamer->EmitCOFFSecRel32(Label);
- return;
- }
+void AsmPrinter::emitDwarfSymbolReference(const MCSymbol *Label,
+ bool ForceOffset) const {
+ if (!ForceOffset) {
+ // On COFF targets, we have to emit the special .secrel32 directive.
+ if (MAI->needsDwarfSectionOffsetDirective()) {
+ OutStreamer->EmitCOFFSecRel32(Label);
+ return;
+ }
- // If the format uses relocations with dwarf, refer to the symbol directly.
- if (MAI->doesDwarfUseRelocationsAcrossSections()) {
- OutStreamer->EmitSymbolValue(Label, 4);
- return;
+ // If the format uses relocations with dwarf, refer to the symbol directly.
+ if (MAI->doesDwarfUseRelocationsAcrossSections()) {
+ OutStreamer->EmitSymbolValue(Label, 4);
+ return;
+ }
}
// Otherwise, emit it as a label difference from the start of the section.
@@ -183,7 +179,7 @@ void AsmPrinter::emitSectionOffset(const MCSymbol *Label) const {
void AsmPrinter::emitDwarfStringOffset(DwarfStringPoolEntryRef S) const {
if (MAI->doesDwarfUseRelocationsAcrossSections()) {
- emitSectionOffset(S.getSymbol());
+ emitDwarfSymbolReference(S.getSymbol());
return;
}
diff --git a/lib/CodeGen/AsmPrinter/ByteStreamer.h b/lib/CodeGen/AsmPrinter/ByteStreamer.h
index 0cc829fffc54..7a712a076dd9 100644
--- a/lib/CodeGen/AsmPrinter/ByteStreamer.h
+++ b/lib/CodeGen/AsmPrinter/ByteStreamer.h
@@ -103,6 +103,6 @@ public:
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DIE.cpp b/lib/CodeGen/AsmPrinter/DIE.cpp
index fa8449e94c9f..4847de45789b 100644
--- a/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -618,11 +618,7 @@ unsigned DIELocList::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
void DIELocList::EmitValue(const AsmPrinter *AP, dwarf::Form Form) const {
DwarfDebug *DD = AP->getDwarfDebug();
MCSymbol *Label = DD->getDebugLocs().getList(Index).Label;
-
- if (AP->MAI->doesDwarfUseRelocationsAcrossSections() && !DD->useSplitDwarf())
- AP->emitSectionOffset(Label);
- else
- AP->EmitLabelDifference(Label, Label->getSection().getBeginSymbol(), 4);
+ AP->emitDwarfSymbolReference(Label, /*ForceOffset*/ DD->useSplitDwarf());
}
#ifndef NDEBUG
diff --git a/lib/CodeGen/AsmPrinter/DIEHash.h b/lib/CodeGen/AsmPrinter/DIEHash.h
index 1850e042f924..789e6dd91e01 100644
--- a/lib/CodeGen/AsmPrinter/DIEHash.h
+++ b/lib/CodeGen/AsmPrinter/DIEHash.h
@@ -157,6 +157,6 @@ private:
AsmPrinter *AP;
DenseMap<const DIE *, unsigned> Numbering;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
index 546d1b443781..5d4005018013 100644
--- a/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
+++ b/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.h
@@ -55,6 +55,6 @@ public:
void calculateDbgValueHistory(const MachineFunction *MF,
const TargetRegisterInfo *TRI,
DbgValueHistoryMap &Result);
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DebugLocEntry.h b/lib/CodeGen/AsmPrinter/DebugLocEntry.h
index 6a943c64ea22..083228b8fd41 100644
--- a/lib/CodeGen/AsmPrinter/DebugLocEntry.h
+++ b/lib/CodeGen/AsmPrinter/DebugLocEntry.h
@@ -175,6 +175,6 @@ inline bool operator<(const DebugLocEntry::Value &A,
B.getExpression()->getBitPieceOffset();
}
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DebugLocStream.h b/lib/CodeGen/AsmPrinter/DebugLocStream.h
index 3001da21b907..1ae385db4a55 100644
--- a/lib/CodeGen/AsmPrinter/DebugLocStream.h
+++ b/lib/CodeGen/AsmPrinter/DebugLocStream.h
@@ -129,5 +129,5 @@ private:
return Entries[EI + 1].CommentOffset - Entries[EI].CommentOffset;
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfAccelTable.h b/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
index 4d81441f6a72..cc677c260071 100644
--- a/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
+++ b/lib/CodeGen/AsmPrinter/DwarfAccelTable.h
@@ -252,5 +252,5 @@ public:
void dump() { print(dbgs()); }
#endif
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index 689184a651ed..45c56fbb4463 100644
--- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -817,4 +817,4 @@ bool DwarfCompileUnit::includeMinimalInlineScopes() const {
return getCUNode()->getEmissionKind() == DIBuilder::LineTablesOnly ||
(DD->useSplitDwarf() && !Skeleton);
}
-} // end llvm namespace
+} // namespace llvm
diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
index 50e4a54eb3e0..48c302bf9c18 100644
--- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
+++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
@@ -231,6 +231,6 @@ public:
const MCSymbol *getBaseAddress() const { return BaseAddress; }
};
-} // end llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 3f6665bd5768..fb3316985b86 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -1414,7 +1414,7 @@ void DwarfDebug::emitDebugPubSection(
Asm->EmitInt16(dwarf::DW_PUBNAMES_VERSION);
Asm->OutStreamer->AddComment("Offset of Compilation Unit Info");
- Asm->emitSectionOffset(TheU->getLabelBegin());
+ Asm->emitDwarfSymbolReference(TheU->getLabelBegin());
Asm->OutStreamer->AddComment("Compilation Unit Length");
Asm->EmitInt32(TheU->getLength());
@@ -1562,8 +1562,6 @@ void DwarfDebug::emitDebugLoc() {
Asm->OutStreamer->EmitLabel(List.Label);
const DwarfCompileUnit *CU = List.CU;
for (const auto &Entry : DebugLocs.getEntries(List)) {
- if (Entry.BeginSym == Entry.EndSym)
- continue;
// Set up the range. This range is relative to the entry point of the
// compile unit. This is a hard coded 0 for low_pc when we're emitting
// ranges, or the DW_AT_low_pc on the compile unit otherwise.
@@ -1741,7 +1739,7 @@ void DwarfDebug::emitDebugARanges() {
Asm->OutStreamer->AddComment("DWARF Arange version number");
Asm->EmitInt16(dwarf::DW_ARANGES_VERSION);
Asm->OutStreamer->AddComment("Offset Into Debug Info Section");
- Asm->emitSectionOffset(CU->getLabelBegin());
+ Asm->emitDwarfSymbolReference(CU->getLabelBegin());
Asm->OutStreamer->AddComment("Address Size (in bytes)");
Asm->EmitInt8(PtrSize);
Asm->OutStreamer->AddComment("Segment Size (in bytes)");
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
index d56982712d53..a2799b8d6300 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
@@ -65,11 +65,6 @@ void DwarfExpression::AddShr(unsigned ShiftBy) {
EmitOp(dwarf::DW_OP_shr);
}
-void DwarfExpression::AddOpStackValue() {
- if (DwarfVersion >= 4)
- EmitOp(dwarf::DW_OP_stack_value);
-}
-
bool DwarfExpression::AddMachineRegIndirect(unsigned MachineReg, int Offset) {
if (isFrameRegister(MachineReg)) {
// If variable offset is based in frame register then use fbreg.
@@ -177,14 +172,16 @@ void DwarfExpression::AddSignedConstant(int Value) {
// value, so the producers and consumers started to rely on heuristics
// to disambiguate the value vs. location status of the expression.
// See PR21176 for more details.
- AddOpStackValue();
+ if (DwarfVersion >= 4)
+ EmitOp(dwarf::DW_OP_stack_value);
}
void DwarfExpression::AddUnsignedConstant(unsigned Value) {
EmitOp(dwarf::DW_OP_constu);
EmitUnsigned(Value);
// cf. comment in DwarfExpression::AddSignedConstant().
- AddOpStackValue();
+ if (DwarfVersion >= 4)
+ EmitOp(dwarf::DW_OP_stack_value);
}
static unsigned getOffsetOrZero(unsigned OffsetInBits,
@@ -215,30 +212,15 @@ bool DwarfExpression::AddMachineRegExpression(const DIExpression *Expr,
getOffsetOrZero(OffsetInBits, PieceOffsetInBits));
}
case dwarf::DW_OP_plus: {
+ // [DW_OP_reg,Offset,DW_OP_plus,DW_OP_deref] --> [DW_OP_breg,Offset].
auto N = I.getNext();
- unsigned Offset = I->getArg(0);
- // First combine all DW_OP_plus until we hit either a DW_OP_deref or a
- // DW_OP_bit_piece
- while (N != E && N->getOp() == dwarf::DW_OP_plus) {
- Offset += N->getArg(0);
- ++I;
- N = I.getNext();
- }
if (N != E && N->getOp() == dwarf::DW_OP_deref) {
- // [DW_OP_reg,Offset,DW_OP_plus,DW_OP_deref] --> [DW_OP_breg,Offset].
+ unsigned Offset = I->getArg(0);
ValidReg = AddMachineRegIndirect(MachineReg, Offset);
std::advance(I, 2);
- } else {
- assert ((N == E) || (N->getOp() == dwarf::DW_OP_bit_piece));
- if (Offset == 0) {
- ValidReg = AddMachineRegPiece(MachineReg);
- } else {
- ValidReg = AddMachineRegIndirect(MachineReg, Offset);
- AddOpStackValue();
- }
- ++I;
- }
- break;
+ break;
+ } else
+ ValidReg = AddMachineRegPiece(MachineReg);
}
case dwarf::DW_OP_deref: {
// [DW_OP_reg,DW_OP_deref] --> [DW_OP_breg].
@@ -255,7 +237,6 @@ bool DwarfExpression::AddMachineRegExpression(const DIExpression *Expr,
// Emit remaining elements of the expression.
AddExpression(I, E, PieceOffsetInBits);
-
return true;
}
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.h b/lib/CodeGen/AsmPrinter/DwarfExpression.h
index f6249fff4253..154d7d9b9645 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.h
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.h
@@ -83,9 +83,6 @@ public:
bool AddMachineRegPiece(unsigned MachineReg, unsigned PieceSizeInBits = 0,
unsigned PieceOffsetInBits = 0);
- /// Emit a DW_OP_stack_value
- void AddOpStackValue();
-
/// Emit a signed constant.
void AddSignedConstant(int Value);
/// Emit an unsigned constant.
@@ -134,6 +131,6 @@ public:
void EmitUnsigned(uint64_t Value) override;
bool isFrameRegister(unsigned MachineReg) override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfFile.cpp b/lib/CodeGen/AsmPrinter/DwarfFile.cpp
index 5ef333c4cf44..fdefb1df84b6 100644
--- a/lib/CodeGen/AsmPrinter/DwarfFile.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfFile.cpp
@@ -170,4 +170,4 @@ bool DwarfFile::addScopeVariable(LexicalScope *LS, DbgVariable *Var) {
Vars.push_back(Var);
return true;
}
-}
+} // namespace llvm
diff --git a/lib/CodeGen/AsmPrinter/DwarfFile.h b/lib/CodeGen/AsmPrinter/DwarfFile.h
index 8402027edd6f..22759fdecccf 100644
--- a/lib/CodeGen/AsmPrinter/DwarfFile.h
+++ b/lib/CodeGen/AsmPrinter/DwarfFile.h
@@ -114,5 +114,5 @@ public:
return DITypeNodeToDieMap.lookup(TypeMD);
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfStringPool.h b/lib/CodeGen/AsmPrinter/DwarfStringPool.h
index 93a168485a54..c10725815351 100644
--- a/lib/CodeGen/AsmPrinter/DwarfStringPool.h
+++ b/lib/CodeGen/AsmPrinter/DwarfStringPool.h
@@ -45,5 +45,5 @@ public:
/// Get a reference to an entry in the string pool.
EntryRef getEntry(AsmPrinter &Asm, StringRef Str);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 907f6706bc6a..f4b15ba053e9 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -931,7 +931,7 @@ void DwarfUnit::constructTypeDIE(DIE &Buffer, const DICompositeType *CTy) {
StringRef PropertyName = Property->getName();
addString(ElemDie, dwarf::DW_AT_APPLE_property_name, PropertyName);
if (Property->getType())
- addType(ElemDie, Property->getType());
+ addType(ElemDie, resolve(Property->getType()));
addSourceLine(ElemDie, Property);
StringRef GetterName = Property->getGetterName();
if (!GetterName.empty())
@@ -1449,10 +1449,8 @@ void DwarfUnit::emitHeader(bool UseOffsets) {
// start of the section. Use a relocatable offset where needed to ensure
// linking doesn't invalidate that offset.
const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
- if (!UseOffsets)
- Asm->emitSectionOffset(TLOF.getDwarfAbbrevSection()->getBeginSymbol());
- else
- Asm->EmitInt32(0);
+ Asm->emitDwarfSymbolReference(TLOF.getDwarfAbbrevSection()->getBeginSymbol(),
+ UseOffsets);
Asm->OutStreamer->AddComment("Address Size (in bytes)");
Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.h b/lib/CodeGen/AsmPrinter/DwarfUnit.h
index f56c9b4eb13e..200ddf0f3cbe 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.h
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -402,5 +402,5 @@ public:
}
DwarfCompileUnit &getCU() override { return CU; }
};
-} // end llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/EHStreamer.h b/lib/CodeGen/AsmPrinter/EHStreamer.h
index 65973fab6b21..128a8ad39255 100644
--- a/lib/CodeGen/AsmPrinter/EHStreamer.h
+++ b/lib/CodeGen/AsmPrinter/EHStreamer.h
@@ -132,7 +132,7 @@ public:
void beginInstruction(const MachineInstr *MI) override {}
void endInstruction() override {}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
index 535b1f605853..11bfe767a27b 100644
--- a/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
+++ b/lib/CodeGen/AsmPrinter/WinCodeViewLineTables.cpp
@@ -378,4 +378,4 @@ void WinCodeViewLineTables::beginInstruction(const MachineInstr *MI) {
return;
maybeRecordLocation(DL, Asm->MF);
}
-}
+} // namespace llvm
diff --git a/lib/CodeGen/AsmPrinter/WinException.cpp b/lib/CodeGen/AsmPrinter/WinException.cpp
index f1663503c08e..1ba6060a89f6 100644
--- a/lib/CodeGen/AsmPrinter/WinException.cpp
+++ b/lib/CodeGen/AsmPrinter/WinException.cpp
@@ -50,6 +50,11 @@ WinException::~WinException() {}
/// endModule - Emit all exception information that should come after the
/// content.
void WinException::endModule() {
+ auto &OS = *Asm->OutStreamer;
+ const Module *M = MMI->getModule();
+ for (const Function &F : *M)
+ if (F.hasFnAttribute("safeseh"))
+ OS.EmitCOFFSafeSEH(Asm->getSymbol(&F));
}
void WinException::beginFunction(const MachineFunction *MF) {
@@ -144,7 +149,7 @@ void WinException::endFunction(const MachineFunction *MF) {
if (Per == EHPersonality::MSVC_Win64SEH)
emitCSpecificHandlerTable();
else if (Per == EHPersonality::MSVC_X86SEH)
- emitCSpecificHandlerTable(); // FIXME
+ emitExceptHandlerTable(MF);
else if (Per == EHPersonality::MSVC_CXX)
emitCXXFrameHandler3Table(MF);
else
@@ -444,7 +449,7 @@ void WinException::emitCXXFrameHandler3Table(const MachineFunction *MF) {
Asm->OutContext.getOrCreateParentFrameOffsetSymbol(
GlobalValue::getRealLinkageName(HT.Handler->getName()));
const MCSymbolRefExpr *ParentFrameOffsetRef = MCSymbolRefExpr::create(
- ParentFrameOffset, MCSymbolRefExpr::VK_None, Asm->OutContext);
+ ParentFrameOffset, Asm->OutContext);
OS.EmitValue(ParentFrameOffsetRef, 4); // ParentFrameOffset
}
}
@@ -541,3 +546,103 @@ void WinException::extendIP2StateTable(const MachineFunction *MF,
}
}
}
+
+/// Emit the language-specific data that _except_handler3 and 4 expect. This is
+/// functionally equivalent to the __C_specific_handler table, except it is
+/// indexed by state number instead of IP.
+void WinException::emitExceptHandlerTable(const MachineFunction *MF) {
+ MCStreamer &OS = *Asm->OutStreamer;
+
+ // Define the EH registration node offset label in terms of its frameescape
+ // label. The WinEHStatePass ensures that the registration node is passed to
+ // frameescape. This allows SEH filter functions to access the
+ // EXCEPTION_POINTERS field, which is filled in by the _except_handlerN.
+ const Function *F = MF->getFunction();
+ WinEHFuncInfo &FuncInfo = MMI->getWinEHFuncInfo(F);
+ assert(FuncInfo.EHRegNodeEscapeIndex != INT_MAX &&
+ "no EH reg node frameescape index");
+ StringRef FLinkageName = GlobalValue::getRealLinkageName(F->getName());
+ MCSymbol *ParentFrameOffset =
+ Asm->OutContext.getOrCreateParentFrameOffsetSymbol(FLinkageName);
+ MCSymbol *FrameAllocSym = Asm->OutContext.getOrCreateFrameAllocSymbol(
+ FLinkageName, FuncInfo.EHRegNodeEscapeIndex);
+ const MCSymbolRefExpr *FrameAllocSymRef =
+ MCSymbolRefExpr::create(FrameAllocSym, Asm->OutContext);
+ OS.EmitAssignment(ParentFrameOffset, FrameAllocSymRef);
+
+ // Emit the __ehtable label that we use for llvm.x86.seh.lsda.
+ MCSymbol *LSDALabel = Asm->OutContext.getOrCreateLSDASymbol(FLinkageName);
+ OS.EmitLabel(LSDALabel);
+
+ const Function *Per = MMI->getPersonality();
+ StringRef PerName = Per->getName();
+ int BaseState = -1;
+ if (PerName == "_except_handler4") {
+ // The LSDA for _except_handler4 starts with this struct, followed by the
+ // scope table:
+ //
+ // struct EH4ScopeTable {
+ // int32_t GSCookieOffset;
+ // int32_t GSCookieXOROffset;
+ // int32_t EHCookieOffset;
+ // int32_t EHCookieXOROffset;
+ // ScopeTableEntry ScopeRecord[];
+ // };
+ //
+ // Only the EHCookieOffset field appears to vary, and it appears to be the
+ // offset from the final saved SP value to the retaddr.
+ OS.EmitIntValue(-2, 4);
+ OS.EmitIntValue(0, 4);
+ // FIXME: Calculate.
+ OS.EmitIntValue(9999, 4);
+ OS.EmitIntValue(0, 4);
+ BaseState = -2;
+ }
+
+ // Build a list of pointers to LandingPadInfos and then sort by WinEHState.
+ const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
+ SmallVector<const LandingPadInfo *, 4> LPads;
+ LPads.reserve((PadInfos.size()));
+ for (const LandingPadInfo &LPInfo : PadInfos)
+ LPads.push_back(&LPInfo);
+ std::sort(LPads.begin(), LPads.end(),
+ [](const LandingPadInfo *L, const LandingPadInfo *R) {
+ return L->WinEHState < R->WinEHState;
+ });
+
+ // For each action in each lpad, emit one of these:
+ // struct ScopeTableEntry {
+ // int32_t EnclosingLevel;
+ // int32_t (__cdecl *Filter)();
+ // void *HandlerOrFinally;
+ // };
+ //
+ // The "outermost" action will use BaseState as its enclosing level. Each
+ // other action will refer to the previous state as its enclosing level.
+ int CurState = 0;
+ for (const LandingPadInfo *LPInfo : LPads) {
+ int EnclosingLevel = BaseState;
+ assert(CurState + int(LPInfo->SEHHandlers.size()) - 1 ==
+ LPInfo->WinEHState &&
+ "gaps in the SEH scope table");
+ for (auto I = LPInfo->SEHHandlers.rbegin(), E = LPInfo->SEHHandlers.rend();
+ I != E; ++I) {
+ const SEHHandler &Handler = *I;
+ const BlockAddress *BA = Handler.RecoverBA;
+ const Function *F = Handler.FilterOrFinally;
+ assert(F && "cannot catch all in 32-bit SEH without filter function");
+ const MCExpr *FilterOrNull =
+ create32bitRef(BA ? Asm->getSymbol(F) : nullptr);
+ const MCExpr *ExceptOrFinally = create32bitRef(
+ BA ? Asm->GetBlockAddressSymbol(BA) : Asm->getSymbol(F));
+
+ OS.EmitIntValue(EnclosingLevel, 4);
+ OS.EmitValue(FilterOrNull, 4);
+ OS.EmitValue(ExceptOrFinally, 4);
+
+ // The next state unwinds to this state.
+ EnclosingLevel = CurState;
+ CurState++;
+ }
+ }
+}
diff --git a/lib/CodeGen/AsmPrinter/WinException.h b/lib/CodeGen/AsmPrinter/WinException.h
index 478899b79da9..bbff3c24cffc 100644
--- a/lib/CodeGen/AsmPrinter/WinException.h
+++ b/lib/CodeGen/AsmPrinter/WinException.h
@@ -38,8 +38,15 @@ class WinException : public EHStreamer {
void emitCSpecificHandlerTable();
+ /// Emit the EH table data for 32-bit and 64-bit functions using
+ /// the __CxxFrameHandler3 personality.
void emitCXXFrameHandler3Table(const MachineFunction *MF);
+ /// Emit the EH table data for _except_handler3 and _except_handler4
+ /// personality functions. These are only used on 32-bit and do not use CFI
+ /// tables.
+ void emitExceptHandlerTable(const MachineFunction *MF);
+
void extendIP2StateTable(const MachineFunction *MF, const Function *ParentF,
WinEHFuncInfo &FuncInfo);
@@ -63,7 +70,7 @@ public:
/// Gather and emit post-function exception information.
void endFunction(const MachineFunction *) override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp
index fa17108b2a8e..0bb0fa34e314 100644
--- a/lib/CodeGen/AtomicExpandPass.cpp
+++ b/lib/CodeGen/AtomicExpandPass.cpp
@@ -55,7 +55,7 @@ namespace {
bool isIdempotentRMW(AtomicRMWInst *AI);
bool simplifyIdempotentRMW(AtomicRMWInst *AI);
};
-}
+} // namespace
char AtomicExpand::ID = 0;
char &llvm::AtomicExpandID = AtomicExpand::ID;
@@ -464,7 +464,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
Value *ShouldStore =
Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store");
- // If the the cmpxchg doesn't actually need any ordering when it fails, we can
+ // If the cmpxchg doesn't actually need any ordering when it fails, we can
// jump straight past that fence instruction (if it exists).
Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB);
diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp
index b8d9a1a29edc..e7b7f5b939e3 100644
--- a/lib/CodeGen/BranchFolding.cpp
+++ b/lib/CodeGen/BranchFolding.cpp
@@ -79,7 +79,7 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char BranchFolderPass::ID = 0;
char &llvm::BranchFolderPassID = BranchFolderPass::ID;
@@ -273,8 +273,12 @@ static unsigned HashMachineInstr(const MachineInstr *MI) {
// Merge in bits from the operand if easy.
unsigned OperandHash = 0;
switch (Op.getType()) {
- case MachineOperand::MO_Register: OperandHash = Op.getReg(); break;
- case MachineOperand::MO_Immediate: OperandHash = Op.getImm(); break;
+ case MachineOperand::MO_Register:
+ OperandHash = Op.getReg();
+ break;
+ case MachineOperand::MO_Immediate:
+ OperandHash = Op.getImm();
+ break;
case MachineOperand::MO_MachineBasicBlock:
OperandHash = Op.getMBB()->getNumber();
break;
@@ -289,10 +293,11 @@ static unsigned HashMachineInstr(const MachineInstr *MI) {
// pull in the offset.
OperandHash = Op.getOffset();
break;
- default: break;
+ default:
+ break;
}
- Hash += ((OperandHash << 3) | Op.getType()) << (i&31);
+ Hash += ((OperandHash << 3) | Op.getType()) << (i & 31);
}
return Hash;
}
@@ -301,13 +306,13 @@ static unsigned HashMachineInstr(const MachineInstr *MI) {
static unsigned HashEndOfMBB(const MachineBasicBlock *MBB) {
MachineBasicBlock::const_iterator I = MBB->end();
if (I == MBB->begin())
- return 0; // Empty MBB.
+ return 0; // Empty MBB.
--I;
// Skip debug info so it will not affect codegen.
while (I->isDebugValue()) {
- if (I==MBB->begin())
- return 0; // MBB empty except for debug info.
+ if (I == MBB->begin())
+ return 0; // MBB empty except for debug info.
--I;
}
diff --git a/lib/CodeGen/BranchFolding.h b/lib/CodeGen/BranchFolding.h
index 3653a2ccd623..d1b17dd31aab 100644
--- a/lib/CodeGen/BranchFolding.h
+++ b/lib/CodeGen/BranchFolding.h
@@ -142,6 +142,6 @@ namespace llvm {
bool HoistCommonCode(MachineFunction &MF);
bool HoistCommonCodeInSuccs(MachineBasicBlock *MBB);
};
-}
+} // namespace llvm
#endif /* LLVM_CODEGEN_BRANCHFOLDING_HPP */
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 6d2af9003509..a992c5e00b21 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -20,12 +20,14 @@ add_llvm_library(LLVMCodeGen
ExecutionDepsFix.cpp
ExpandISelPseudos.cpp
ExpandPostRAPseudos.cpp
+ FaultMaps.cpp
GCMetadata.cpp
GCMetadataPrinter.cpp
GCRootLowering.cpp
GCStrategy.cpp
GlobalMerge.cpp
IfConversion.cpp
+ ImplicitNullChecks.cpp
InlineSpiller.cpp
InterferenceCache.cpp
IntrinsicLowering.cpp
@@ -71,6 +73,7 @@ add_llvm_library(LLVMCodeGen
MachineSink.cpp
MachineTraceMetrics.cpp
MachineVerifier.cpp
+ MIRPrinter.cpp
MIRPrintingPass.cpp
OcamlGC.cpp
OptimizePHIs.cpp
diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp
index 034ffb34b9cc..fb29b1db7a43 100644
--- a/lib/CodeGen/CallingConvLower.cpp
+++ b/lib/CodeGen/CallingConvLower.cpp
@@ -37,9 +37,9 @@ CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
UsedRegs.resize((TRI.getNumRegs()+31)/32);
}
-// HandleByVal - Allocate space on the stack large enough to pass an argument
-// by value. The size and alignment information of the argument is encoded in
-// its parameter attribute.
+/// Allocate space on the stack large enough to pass an argument by value.
+/// The size and alignment information of the argument is encoded in
+/// its parameter attribute.
void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo,
int MinSize, int MinAlign,
@@ -57,13 +57,13 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
}
-/// MarkAllocated - Mark a register and all of its aliases as allocated.
+/// Mark a register and all of its aliases as allocated.
void CCState::MarkAllocated(unsigned Reg) {
for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
UsedRegs[*AI/32] |= 1 << (*AI&31);
}
-/// AnalyzeFormalArguments - Analyze an array of argument values,
+/// Analyze an array of argument values,
/// incorporating info about the formals into this state.
void
CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
@@ -83,8 +83,8 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
}
}
-/// CheckReturn - Analyze the return values of a function, returning true if
-/// the return can be performed without sret-demotion, and false otherwise.
+/// Analyze the return values of a function, returning true if the return can
+/// be performed without sret-demotion and false otherwise.
bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
// Determine which register each value should be copied into.
@@ -97,7 +97,7 @@ bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
return true;
}
-/// AnalyzeReturn - Analyze the returned values of a return,
+/// Analyze the returned values of a return,
/// incorporating info about the result values into this state.
void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
@@ -115,7 +115,7 @@ void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
}
}
-/// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
+/// Analyze the outgoing arguments to a call,
/// incorporating info about the passed values into this state.
void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
CCAssignFn Fn) {
@@ -133,8 +133,7 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
}
}
-/// AnalyzeCallOperands - Same as above except it takes vectors of types
-/// and argument flags.
+/// Same as above except it takes vectors of types and argument flags.
void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
CCAssignFn Fn) {
@@ -152,8 +151,8 @@ void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
}
}
-/// AnalyzeCallResult - Analyze the return values of a call,
-/// incorporating info about the passed values into this state.
+/// Analyze the return values of a call, incorporating info about the passed
+/// values into this state.
void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
CCAssignFn Fn) {
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
@@ -169,8 +168,7 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
}
}
-/// AnalyzeCallResult - Same as above except it's specialized for calls which
-/// produce a single value.
+/// Same as above except it's specialized for calls that produce a single value.
void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
#ifndef NDEBUG
diff --git a/lib/CodeGen/CodeGen.cpp b/lib/CodeGen/CodeGen.cpp
index 2c6eaf35a257..155c5ecec772 100644
--- a/lib/CodeGen/CodeGen.cpp
+++ b/lib/CodeGen/CodeGen.cpp
@@ -42,6 +42,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeMachineBlockPlacementPass(Registry);
initializeMachineBlockPlacementStatsPass(Registry);
initializeMachineCSEPass(Registry);
+ initializeImplicitNullChecksPass(Registry);
initializeMachineCombinerPass(Registry);
initializeMachineCopyPropagationPass(Registry);
initializeMachineDominatorTreePass(Registry);
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index 6a814038c688..247c45bd4366 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -135,8 +135,8 @@ class TypePromotionTransaction;
/// multiple load/stores of the same address.
ValueMap<Value*, Value*> SunkAddrs;
- /// Keeps track of all truncates inserted for the current function.
- SetOfInstrs InsertedTruncsSet;
+ /// Keeps track of all instructions inserted for the current function.
+ SetOfInstrs InsertedInsts;
/// Keeps track of the type of the related instruction before their
/// promotion for the current function.
InstrToOrigTy PromotedInsts;
@@ -189,7 +189,7 @@ class TypePromotionTransaction;
bool splitBranchCondition(Function &F);
bool simplifyOffsetableRelocate(Instruction &I);
};
-}
+} // namespace
char CodeGenPrepare::ID = 0;
INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare",
@@ -205,7 +205,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
bool EverMadeChange = false;
// Clear per function information.
- InsertedTruncsSet.clear();
+ InsertedInsts.clear();
PromotedInsts.clear();
ModifiedDT = false;
@@ -1406,6 +1406,9 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) {
return false;
// Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
ExtVal->moveBefore(CI);
+ // Mark this instruction as "inserted by CGP", so that other
+ // optimizations don't touch it.
+ InsertedInsts.insert(ExtVal);
return true;
}
}
@@ -2107,8 +2110,8 @@ class AddressingModeMatcher {
/// part of the return value of this addressing mode matching stuff.
ExtAddrMode &AddrMode;
- /// The truncate instruction inserted by other CodeGenPrepare optimizations.
- const SetOfInstrs &InsertedTruncs;
+ /// The instructions inserted by other CodeGenPrepare optimizations.
+ const SetOfInstrs &InsertedInsts;
/// A map from the instructions to their type before promotion.
InstrToOrigTy &PromotedInsts;
/// The ongoing transaction where every action should be registered.
@@ -2122,14 +2125,14 @@ class AddressingModeMatcher {
AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI,
const TargetMachine &TM, Type *AT, unsigned AS,
Instruction *MI, ExtAddrMode &AM,
- const SetOfInstrs &InsertedTruncs,
+ const SetOfInstrs &InsertedInsts,
InstrToOrigTy &PromotedInsts,
TypePromotionTransaction &TPT)
: AddrModeInsts(AMI), TM(TM),
TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent())
->getTargetLowering()),
AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
- InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) {
+ InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT) {
IgnoreProfitability = false;
}
public:
@@ -2137,8 +2140,7 @@ public:
/// Match - Find the maximal addressing mode that a load/store of V can fold,
/// give an access type of AccessTy. This returns a list of involved
/// instructions in AddrModeInsts.
- /// \p InsertedTruncs The truncate instruction inserted by other
- /// CodeGenPrepare
+ /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
/// optimizations.
/// \p PromotedInsts maps the instructions to their type before promotion.
/// \p The ongoing transaction where every action should be registered.
@@ -2146,13 +2148,13 @@ public:
Instruction *MemoryInst,
SmallVectorImpl<Instruction*> &AddrModeInsts,
const TargetMachine &TM,
- const SetOfInstrs &InsertedTruncs,
+ const SetOfInstrs &InsertedInsts,
InstrToOrigTy &PromotedInsts,
TypePromotionTransaction &TPT) {
ExtAddrMode Result;
bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, AS,
- MemoryInst, Result, InsertedTruncs,
+ MemoryInst, Result, InsertedInsts,
PromotedInsts, TPT).MatchAddr(V, 0);
(void)Success; assert(Success && "Couldn't select *anything*?");
return Result;
@@ -2361,12 +2363,12 @@ public:
/// action to promote the operand of \p Ext instead of using Ext.
/// \return NULL if no promotable action is possible with the current
/// sign extension.
- /// \p InsertedTruncs keeps track of all the truncate instructions inserted by
- /// the others CodeGenPrepare optimizations. This information is important
+ /// \p InsertedInsts keeps track of all the instructions inserted by the
+ /// other CodeGenPrepare optimizations. This information is important
/// because we do not want to promote these instructions as CodeGenPrepare
/// will reinsert them later. Thus creating an infinite loop: create/remove.
/// \p PromotedInsts maps the instructions to their type before promotion.
- static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedTruncs,
+ static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
const TargetLowering &TLI,
const InstrToOrigTy &PromotedInsts);
};
@@ -2439,7 +2441,7 @@ bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
}
TypePromotionHelper::Action TypePromotionHelper::getAction(
- Instruction *Ext, const SetOfInstrs &InsertedTruncs,
+ Instruction *Ext, const SetOfInstrs &InsertedInsts,
const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
"Unexpected instruction type");
@@ -2455,7 +2457,7 @@ TypePromotionHelper::Action TypePromotionHelper::getAction(
// Do not promote if the operand has been added by codegenprepare.
// Otherwise, it means we are undoing an optimization that is likely to be
// redone, thus causing potential infinite loop.
- if (isa<TruncInst>(ExtOpnd) && InsertedTruncs.count(ExtOpnd))
+ if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
return nullptr;
// SExt or Trunc instructions.
@@ -2839,7 +2841,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
// Try to move this ext out of the way of the addressing mode.
// Ask for a method for doing so.
TypePromotionHelper::Action TPH =
- TypePromotionHelper::getAction(Ext, InsertedTruncs, TLI, PromotedInsts);
+ TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
if (!TPH)
return false;
@@ -3157,7 +3159,7 @@ IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, AS,
- MemoryInst, Result, InsertedTruncs,
+ MemoryInst, Result, InsertedInsts,
PromotedInsts, TPT);
Matcher.IgnoreProfitability = true;
bool Success = Matcher.MatchAddr(Address, 0);
@@ -3240,7 +3242,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
SmallVector<Instruction*, 16> NewAddrModeInsts;
ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TM,
- InsertedTruncsSet, PromotedInsts, TPT);
+ InsertedInsts, PromotedInsts, TPT);
// This check is broken into two cases with very similar code to avoid using
// getNumUses() as much as possible. Some values have a lot of uses, so
@@ -3652,7 +3654,7 @@ bool CodeGenPrepare::ExtLdPromotion(TypePromotionTransaction &TPT,
continue;
// Get the action to perform the promotion.
TypePromotionHelper::Action TPH = TypePromotionHelper::getAction(
- I, InsertedTruncsSet, *TLI, PromotedInsts);
+ I, InsertedInsts, *TLI, PromotedInsts);
// Check if we can promote.
if (!TPH)
continue;
@@ -3828,7 +3830,7 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
if (!InsertedTrunc) {
BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
- InsertedTruncsSet.insert(InsertedTrunc);
+ InsertedInsts.insert(InsertedTrunc);
}
// Replace a use of the {s|z}ext source with a use of the result.
@@ -4357,6 +4359,11 @@ bool CodeGenPrepare::OptimizeExtractElementInst(Instruction *Inst) {
}
bool CodeGenPrepare::OptimizeInst(Instruction *I, bool& ModifiedDT) {
+ // Bail out if we inserted the instruction to prevent optimizations from
+ // stepping on each other's toes.
+ if (InsertedInsts.count(I))
+ return false;
+
if (PHINode *P = dyn_cast<PHINode>(I)) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
// to introduce PHI nodes too late to be cleaned up. If we detect such a
diff --git a/lib/CodeGen/CoreCLRGC.cpp b/lib/CodeGen/CoreCLRGC.cpp
index 28c97ba71bd9..0816d1488c28 100644
--- a/lib/CodeGen/CoreCLRGC.cpp
+++ b/lib/CodeGen/CoreCLRGC.cpp
@@ -45,7 +45,7 @@ public:
return (1 == PT->getAddressSpace());
}
};
-}
+} // namespace
static GCRegistry::Add<CoreCLRGC> X("coreclr", "CoreCLR-compatible GC");
diff --git a/lib/CodeGen/CriticalAntiDepBreaker.h b/lib/CodeGen/CriticalAntiDepBreaker.h
index af011a0a65f6..1ca530087c44 100644
--- a/lib/CodeGen/CriticalAntiDepBreaker.h
+++ b/lib/CodeGen/CriticalAntiDepBreaker.h
@@ -103,6 +103,6 @@ class TargetRegisterInfo;
const TargetRegisterClass *RC,
SmallVectorImpl<unsigned> &Forbid);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/DFAPacketizer.cpp b/lib/CodeGen/DFAPacketizer.cpp
index 0a188c0935ad..02cdb5086de2 100644
--- a/lib/CodeGen/DFAPacketizer.cpp
+++ b/lib/CodeGen/DFAPacketizer.cpp
@@ -110,7 +110,7 @@ public:
// Schedule - Actual scheduling work.
void schedule() override;
};
-}
+} // namespace llvm
DefaultVLIWScheduler::DefaultVLIWScheduler(MachineFunction &MF,
MachineLoopInfo &MLI, bool IsPostRA)
diff --git a/lib/CodeGen/DeadMachineInstructionElim.cpp b/lib/CodeGen/DeadMachineInstructionElim.cpp
index 963d573ea7f0..efaf47c40c82 100644
--- a/lib/CodeGen/DeadMachineInstructionElim.cpp
+++ b/lib/CodeGen/DeadMachineInstructionElim.cpp
@@ -45,7 +45,7 @@ namespace {
private:
bool isDead(const MachineInstr *MI) const;
};
-}
+} // namespace
char DeadMachineInstructionElim::ID = 0;
char &llvm::DeadMachineInstructionElimID = DeadMachineInstructionElim::ID;
diff --git a/lib/CodeGen/DwarfEHPrepare.cpp b/lib/CodeGen/DwarfEHPrepare.cpp
index 42656fb08db1..e019dfbc8f7d 100644
--- a/lib/CodeGen/DwarfEHPrepare.cpp
+++ b/lib/CodeGen/DwarfEHPrepare.cpp
@@ -181,27 +181,22 @@ size_t DwarfEHPrepare::pruneUnreachableResumes(
bool DwarfEHPrepare::InsertUnwindResumeCalls(Function &Fn) {
SmallVector<ResumeInst*, 16> Resumes;
SmallVector<LandingPadInst*, 16> CleanupLPads;
- bool FoundLP = false;
for (BasicBlock &BB : Fn) {
if (auto *RI = dyn_cast<ResumeInst>(BB.getTerminator()))
Resumes.push_back(RI);
- if (auto *LP = BB.getLandingPadInst()) {
+ if (auto *LP = BB.getLandingPadInst())
if (LP->isCleanup())
CleanupLPads.push_back(LP);
- // Check the personality on the first landingpad. Don't do anything if
- // it's for MSVC.
- if (!FoundLP) {
- FoundLP = true;
- EHPersonality Pers = classifyEHPersonality(LP->getPersonalityFn());
- if (isMSVCEHPersonality(Pers))
- return false;
- }
- }
}
if (Resumes.empty())
return false;
+ // Check the personality, don't do anything if it's for MSVC.
+ EHPersonality Pers = classifyEHPersonality(Fn.getPersonalityFn());
+ if (isMSVCEHPersonality(Pers))
+ return false;
+
LLVMContext &Ctx = Fn.getContext();
size_t ResumesLeft = pruneUnreachableResumes(Fn, Resumes, CleanupLPads);
diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp
index d3687b98b344..fbc4d97c4987 100644
--- a/lib/CodeGen/EarlyIfConversion.cpp
+++ b/lib/CodeGen/EarlyIfConversion.cpp
@@ -479,11 +479,20 @@ void SSAIfConv::rewritePHIOperands() {
// Convert all PHIs to select instructions inserted before FirstTerm.
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
PHIInfo &PI = PHIs[i];
+ unsigned DstReg = 0;
+
DEBUG(dbgs() << "If-converting " << *PI.PHI);
- unsigned PHIDst = PI.PHI->getOperand(0).getReg();
- unsigned DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
- TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
- DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
+ if (PI.TReg == PI.FReg) {
+ // We do not need the select instruction if both incoming values are
+ // equal.
+ DstReg = PI.TReg;
+ } else {
+ unsigned PHIDst = PI.PHI->getOperand(0).getReg();
+ DstReg = MRI->createVirtualRegister(MRI->getRegClass(PHIDst));
+ TII->insertSelect(*Head, FirstTerm, HeadDL,
+ DstReg, Cond, PI.TReg, PI.FReg);
+ DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
+ }
// Rewrite PHI operands TPred -> (DstReg, Head), remove FPred.
for (unsigned i = PI.PHI->getNumOperands(); i != 1; i -= 2) {
diff --git a/lib/CodeGen/EdgeBundles.cpp b/lib/CodeGen/EdgeBundles.cpp
index aea7c31ba316..f43b2f1264d2 100644
--- a/lib/CodeGen/EdgeBundles.cpp
+++ b/lib/CodeGen/EdgeBundles.cpp
@@ -89,7 +89,7 @@ raw_ostream &WriteGraph<>(raw_ostream &O, const EdgeBundles &G,
O << "}\n";
return O;
}
-}
+} // namespace llvm
/// view - Visualize the annotated bipartite CFG with Graphviz.
void EdgeBundles::view() const {
diff --git a/lib/CodeGen/ExecutionDepsFix.cpp b/lib/CodeGen/ExecutionDepsFix.cpp
index 5b09cf1a0fd7..dd508b3e5e32 100644
--- a/lib/CodeGen/ExecutionDepsFix.cpp
+++ b/lib/CodeGen/ExecutionDepsFix.cpp
@@ -110,7 +110,7 @@ struct DomainValue {
Instrs.clear();
}
};
-}
+} // namespace
namespace {
/// Information about a live register.
@@ -201,7 +201,7 @@ private:
bool shouldBreakDependence(MachineInstr*, unsigned OpIdx, unsigned Pref);
void processUndefReads(MachineBasicBlock*);
};
-}
+} // namespace
char ExeDepsFix::ID = 0;
diff --git a/lib/CodeGen/FaultMaps.cpp b/lib/CodeGen/FaultMaps.cpp
new file mode 100644
index 000000000000..0512ff95d1bf
--- /dev/null
+++ b/lib/CodeGen/FaultMaps.cpp
@@ -0,0 +1,114 @@
+//===---------------------------- FaultMaps.cpp ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/FaultMaps.h"
+
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "faultmaps"
+
+static const int FaultMapVersion = 1;
+const char *FaultMaps::WFMP = "Fault Maps: ";
+
+FaultMaps::FaultMaps(AsmPrinter &AP) : AP(AP) {}
+
+void FaultMaps::recordFaultingOp(FaultKind FaultTy,
+ const MCSymbol *HandlerLabel) {
+ MCContext &OutContext = AP.OutStreamer->getContext();
+ MCSymbol *FaultingLabel = OutContext.createTempSymbol();
+
+ AP.OutStreamer->EmitLabel(FaultingLabel);
+
+ const MCExpr *FaultingOffset = MCBinaryExpr::createSub(
+ MCSymbolRefExpr::create(FaultingLabel, OutContext),
+ MCSymbolRefExpr::create(AP.CurrentFnSymForSize, OutContext), OutContext);
+
+ const MCExpr *HandlerOffset = MCBinaryExpr::createSub(
+ MCSymbolRefExpr::create(HandlerLabel, OutContext),
+ MCSymbolRefExpr::create(AP.CurrentFnSymForSize, OutContext), OutContext);
+
+ FunctionInfos[AP.CurrentFnSym].emplace_back(FaultTy, FaultingOffset,
+ HandlerOffset);
+}
+
+void FaultMaps::serializeToFaultMapSection() {
+ if (FunctionInfos.empty())
+ return;
+
+ MCContext &OutContext = AP.OutStreamer->getContext();
+ MCStreamer &OS = *AP.OutStreamer;
+
+ // Create the section.
+ MCSection *FaultMapSection =
+ OutContext.getObjectFileInfo()->getFaultMapSection();
+ OS.SwitchSection(FaultMapSection);
+
+ // Emit a dummy symbol to force section inclusion.
+ OS.EmitLabel(OutContext.getOrCreateSymbol(Twine("__LLVM_FaultMaps")));
+
+ DEBUG(dbgs() << "********** Fault Map Output **********\n");
+
+ // Header
+ OS.EmitIntValue(FaultMapVersion, 1); // Version.
+ OS.EmitIntValue(0, 1); // Reserved.
+ OS.EmitIntValue(0, 2); // Reserved.
+
+ DEBUG(dbgs() << WFMP << "#functions = " << FunctionInfos.size() << "\n");
+ OS.EmitIntValue(FunctionInfos.size(), 4);
+
+ DEBUG(dbgs() << WFMP << "functions:\n");
+
+ for (const auto &FFI : FunctionInfos)
+ emitFunctionInfo(FFI.first, FFI.second);
+}
+
+void FaultMaps::emitFunctionInfo(const MCSymbol *FnLabel,
+ const FunctionFaultInfos &FFI) {
+ MCStreamer &OS = *AP.OutStreamer;
+
+ DEBUG(dbgs() << WFMP << " function addr: " << *FnLabel << "\n");
+ OS.EmitSymbolValue(FnLabel, 8);
+
+ DEBUG(dbgs() << WFMP << " #faulting PCs: " << FFI.size() << "\n");
+ OS.EmitIntValue(FFI.size(), 4);
+
+ OS.EmitIntValue(0, 4); // Reserved
+
+ for (auto &Fault : FFI) {
+ DEBUG(dbgs() << WFMP << " fault type: "
+ << faultTypeToString(Fault.Kind) << "\n");
+ OS.EmitIntValue(Fault.Kind, 4);
+
+ DEBUG(dbgs() << WFMP << " faulting PC offset: "
+ << *Fault.FaultingOffsetExpr << "\n");
+ OS.EmitValue(Fault.FaultingOffsetExpr, 4);
+
+ DEBUG(dbgs() << WFMP << " fault handler PC offset: "
+ << *Fault.HandlerOffsetExpr << "\n");
+ OS.EmitValue(Fault.HandlerOffsetExpr, 4);
+ }
+}
+
+
+const char *FaultMaps::faultTypeToString(FaultMaps::FaultKind FT) {
+ switch (FT) {
+ default:
+ llvm_unreachable("unhandled fault type!");
+
+ case FaultMaps::FaultingLoad:
+ return "FaultingLoad";
+ }
+}
diff --git a/lib/CodeGen/GCMetadata.cpp b/lib/CodeGen/GCMetadata.cpp
index c8116a453d2d..cba7f5fda5c3 100644
--- a/lib/CodeGen/GCMetadata.cpp
+++ b/lib/CodeGen/GCMetadata.cpp
@@ -38,7 +38,7 @@ public:
bool runOnFunction(Function &F) override;
bool doFinalization(Module &M) override;
};
-}
+} // namespace
INITIALIZE_PASS(GCModuleInfo, "collector-metadata",
"Create Garbage Collector Module Metadata", false, false)
diff --git a/lib/CodeGen/GCRootLowering.cpp b/lib/CodeGen/GCRootLowering.cpp
index d8edd7e4063f..fcef3226ed79 100644
--- a/lib/CodeGen/GCRootLowering.cpp
+++ b/lib/CodeGen/GCRootLowering.cpp
@@ -76,7 +76,7 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
};
-}
+} // namespace
// -----------------------------------------------------------------------------
diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp
index e861ceb2a664..963dfe74742e 100644
--- a/lib/CodeGen/IfConversion.cpp
+++ b/lib/CodeGen/IfConversion.cpp
@@ -264,7 +264,7 @@ namespace {
};
char IfConverter::ID = 0;
-}
+} // namespace
char &llvm::IfConverterID = IfConverter::ID;
diff --git a/lib/CodeGen/ImplicitNullChecks.cpp b/lib/CodeGen/ImplicitNullChecks.cpp
new file mode 100644
index 000000000000..b1176ce184cb
--- /dev/null
+++ b/lib/CodeGen/ImplicitNullChecks.cpp
@@ -0,0 +1,261 @@
+//===-- ImplicitNullChecks.cpp - Fold null checks into memory accesses ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass turns explicit null checks of the form
+//
+// test %r10, %r10
+// je throw_npe
+// movl (%r10), %esi
+// ...
+//
+// to
+//
+// faulting_load_op("movl (%r10), %esi", throw_npe)
+// ...
+//
+// With the help of a runtime that understands the .fault_maps section,
+// faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
+// a page fault.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+static cl::opt<unsigned> PageSize("imp-null-check-page-size",
+ cl::desc("The page size of the target in "
+ "bytes"),
+ cl::init(4096));
+
+namespace {
+
+class ImplicitNullChecks : public MachineFunctionPass {
+ /// Represents one null check that can be made implicit.
+ struct NullCheck {
+ // The memory operation the null check can be folded into.
+ MachineInstr *MemOperation;
+
+ // The instruction actually doing the null check (Ptr != 0).
+ MachineInstr *CheckOperation;
+
+ // The block the check resides in.
+ MachineBasicBlock *CheckBlock;
+
+ // The block branched to if the pointer is non-null.
+ MachineBasicBlock *NotNullSucc;
+
+ // The block branched to if the pointer is null.
+ MachineBasicBlock *NullSucc;
+
+ NullCheck()
+ : MemOperation(), CheckOperation(), CheckBlock(), NotNullSucc(),
+ NullSucc() {}
+
+ explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation,
+ MachineBasicBlock *checkBlock,
+ MachineBasicBlock *notNullSucc,
+ MachineBasicBlock *nullSucc)
+ : MemOperation(memOperation), CheckOperation(checkOperation),
+ CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc) {
+ }
+ };
+
+ const TargetInstrInfo *TII = nullptr;
+ const TargetRegisterInfo *TRI = nullptr;
+ MachineModuleInfo *MMI = nullptr;
+
+ bool analyzeBlockForNullChecks(MachineBasicBlock &MBB,
+ SmallVectorImpl<NullCheck> &NullCheckList);
+ MachineInstr *insertFaultingLoad(MachineInstr *LoadMI, MachineBasicBlock *MBB,
+ MCSymbol *HandlerLabel);
+ void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList);
+
+public:
+ static char ID;
+
+ ImplicitNullChecks() : MachineFunctionPass(ID) {
+ initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // namespace
+
+bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) {
+ TII = MF.getSubtarget().getInstrInfo();
+ TRI = MF.getRegInfo().getTargetRegisterInfo();
+ MMI = &MF.getMMI();
+
+ SmallVector<NullCheck, 16> NullCheckList;
+
+ for (auto &MBB : MF)
+ analyzeBlockForNullChecks(MBB, NullCheckList);
+
+ if (!NullCheckList.empty())
+ rewriteNullChecks(NullCheckList);
+
+ return !NullCheckList.empty();
+}
+
+/// Analyze MBB to check if its terminating branch can be turned into an
+/// implicit null check. If yes, append a description of the said null check to
+/// NullCheckList and return true, else return false.
+bool ImplicitNullChecks::analyzeBlockForNullChecks(
+ MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
+ typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
+
+ MachineBranchPredicate MBP;
+
+ if (TII->AnalyzeBranchPredicate(MBB, MBP, true))
+ return false;
+
+ // Is the predicate comparing an integer to zero?
+ if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
+ (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
+ MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
+ return false;
+
+ // If we cannot erase the test instruction itself, then making the null check
+ // implicit does not buy us much.
+ if (!MBP.SingleUseCondition)
+ return false;
+
+ MachineBasicBlock *NotNullSucc, *NullSucc;
+
+ if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
+ NotNullSucc = MBP.TrueDest;
+ NullSucc = MBP.FalseDest;
+ } else {
+ NotNullSucc = MBP.FalseDest;
+ NullSucc = MBP.TrueDest;
+ }
+
+ // We handle the simplest case for now. We can potentially do better by using
+ // the machine dominator tree.
+ if (NotNullSucc->pred_size() != 1)
+ return false;
+
+ // Starting with a code fragment like:
+ //
+ // test %RAX, %RAX
+ // jne LblNotNull
+ //
+ // LblNull:
+ // callq throw_NullPointerException
+ //
+ // LblNotNull:
+ // Def = Load (%RAX + <offset>)
+ // ...
+ //
+ //
+ // we want to end up with
+ //
+ // Def = TrappingLoad (%RAX + <offset>), LblNull
+ // jmp LblNotNull ;; explicit or fallthrough
+ //
+ // LblNotNull:
+ // ...
+ //
+ // LblNull:
+ // callq throw_NullPointerException
+ //
+
+ unsigned PointerReg = MBP.LHS.getReg();
+ MachineInstr *MemOp = &*NotNullSucc->begin();
+ unsigned BaseReg, Offset;
+ if (TII->getMemOpBaseRegImmOfs(MemOp, BaseReg, Offset, TRI))
+ if (MemOp->mayLoad() && !MemOp->isPredicable() && BaseReg == PointerReg &&
+ Offset < PageSize && MemOp->getDesc().getNumDefs() == 1) {
+ NullCheckList.emplace_back(MemOp, MBP.ConditionDef, &MBB, NotNullSucc,
+ NullSucc);
+ return true;
+ }
+
+ return false;
+}
+
+/// Wrap a machine load instruction, LoadMI, into a FAULTING_LOAD_OP machine
+/// instruction. The FAULTING_LOAD_OP instruction does the same load as LoadMI
+/// (defining the same register), and branches to HandlerLabel if the load
+/// faults. The FAULTING_LOAD_OP instruction is inserted at the end of MBB.
+MachineInstr *ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI,
+ MachineBasicBlock *MBB,
+ MCSymbol *HandlerLabel) {
+ DebugLoc DL;
+ unsigned NumDefs = LoadMI->getDesc().getNumDefs();
+ assert(NumDefs == 1 && "other cases unhandled!");
+ (void)NumDefs;
+
+ unsigned DefReg = LoadMI->defs().begin()->getReg();
+ assert(std::distance(LoadMI->defs().begin(), LoadMI->defs().end()) == 1 &&
+ "expected exactly one def!");
+
+ auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_LOAD_OP), DefReg)
+ .addSym(HandlerLabel)
+ .addImm(LoadMI->getOpcode());
+
+ for (auto &MO : LoadMI->uses())
+ MIB.addOperand(MO);
+
+ MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end());
+
+ return MIB;
+}
+
+/// Rewrite the null checks in NullCheckList into implicit null checks.
+void ImplicitNullChecks::rewriteNullChecks(
+ ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) {
+ DebugLoc DL;
+
+ for (auto &NC : NullCheckList) {
+ MCSymbol *HandlerLabel = MMI->getContext().createTempSymbol();
+
+ // Remove the conditional branch dependent on the null check.
+ unsigned BranchesRemoved = TII->RemoveBranch(*NC.CheckBlock);
+ (void)BranchesRemoved;
+ assert(BranchesRemoved > 0 && "expected at least one branch!");
+
+ // Insert a faulting load where the conditional branch was originally. We
+ // check earlier ensures that this bit of code motion is legal. We do not
+ // touch the successors list for any basic block since we haven't changed
+ // control flow, we've just made it implicit.
+ insertFaultingLoad(NC.MemOperation, NC.CheckBlock, HandlerLabel);
+ NC.MemOperation->removeFromParent();
+ NC.CheckOperation->eraseFromParent();
+
+ // Insert an *unconditional* branch to not-null successor.
+ TII->InsertBranch(*NC.CheckBlock, NC.NotNullSucc, nullptr, /*Cond=*/None,
+ DL);
+
+ // Emit the HandlerLabel as an EH_LABEL.
+ BuildMI(*NC.NullSucc, NC.NullSucc->begin(), DL,
+ TII->get(TargetOpcode::EH_LABEL)).addSym(HandlerLabel);
+ }
+}
+
+char ImplicitNullChecks::ID = 0;
+char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID;
+INITIALIZE_PASS_BEGIN(ImplicitNullChecks, "implicit-null-checks",
+ "Implicit null checks", false, false)
+INITIALIZE_PASS_END(ImplicitNullChecks, "implicit-null-checks",
+ "Implicit null checks", false, false)
diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp
index 9989f233d09c..48c95c9b691f 100644
--- a/lib/CodeGen/InlineSpiller.cpp
+++ b/lib/CodeGen/InlineSpiller.cpp
@@ -181,7 +181,7 @@ private:
void spillAroundUses(unsigned Reg);
void spillAll();
};
-}
+} // namespace
namespace llvm {
@@ -194,7 +194,7 @@ Spiller *createInlineSpiller(MachineFunctionPass &pass,
return new InlineSpiller(pass, mf, vrm);
}
-}
+} // namespace llvm
//===----------------------------------------------------------------------===//
// Snippets
diff --git a/lib/CodeGen/LLVMBuild.txt b/lib/CodeGen/LLVMBuild.txt
index 05905d04dabf..18ed77607c6a 100644
--- a/lib/CodeGen/LLVMBuild.txt
+++ b/lib/CodeGen/LLVMBuild.txt
@@ -22,4 +22,4 @@ subdirectories = AsmPrinter SelectionDAG MIRParser
type = Library
name = CodeGen
parent = Libraries
-required_libraries = Analysis Core MC Scalar Support Target TransformUtils
+required_libraries = Analysis Core Instrumentation MC Scalar Support Target TransformUtils
diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp
index ff5205801bc4..b486bdc91453 100644
--- a/lib/CodeGen/LLVMTargetMachine.cpp
+++ b/lib/CodeGen/LLVMTargetMachine.cpp
@@ -43,16 +43,17 @@ EnableFastISelOption("fast-isel", cl::Hidden,
cl::desc("Enable the \"fast\" instruction selector"));
void LLVMTargetMachine::initAsmInfo() {
- MRI = TheTarget.createMCRegInfo(getTargetTriple());
+ MRI = TheTarget.createMCRegInfo(getTargetTriple().str());
MII = TheTarget.createMCInstrInfo();
// FIXME: Having an MCSubtargetInfo on the target machine is a hack due
// to some backends having subtarget feature dependent module level
// code generation. This is similar to the hack in the AsmPrinter for
// module level assembly etc.
- STI = TheTarget.createMCSubtargetInfo(getTargetTriple(), getTargetCPU(),
+ STI = TheTarget.createMCSubtargetInfo(getTargetTriple().str(), getTargetCPU(),
getTargetFeatureString());
- MCAsmInfo *TmpAsmInfo = TheTarget.createMCAsmInfo(*MRI, getTargetTriple());
+ MCAsmInfo *TmpAsmInfo =
+ TheTarget.createMCAsmInfo(*MRI, getTargetTriple().str());
// TargetSelect.h moved to a different directory between LLVM 2.9 and 3.0,
// and if the old one gets included then MCAsmInfo will be NULL and
// we'll crash later.
@@ -72,12 +73,12 @@ void LLVMTargetMachine::initAsmInfo() {
LLVMTargetMachine::LLVMTargetMachine(const Target &T,
StringRef DataLayoutString,
- StringRef Triple, StringRef CPU,
+ const Triple &TT, StringRef CPU,
StringRef FS, TargetOptions Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : TargetMachine(T, DataLayoutString, Triple, CPU, FS, Options) {
- CodeGenInfo = T.createMCCodeGenInfo(Triple, RM, CM, OL);
+ : TargetMachine(T, DataLayoutString, TT, CPU, FS, Options) {
+ CodeGenInfo = T.createMCCodeGenInfo(TT.str(), RM, CM, OL);
}
TargetIRAnalysis LLVMTargetMachine::getTargetIRAnalysis() {
@@ -87,11 +88,11 @@ TargetIRAnalysis LLVMTargetMachine::getTargetIRAnalysis() {
}
/// addPassesToX helper drives creation and initialization of TargetPassConfig.
-static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
- PassManagerBase &PM,
- bool DisableVerify,
- AnalysisID StartAfter,
- AnalysisID StopAfter) {
+static MCContext *
+addPassesToGenerateCode(LLVMTargetMachine *TM, PassManagerBase &PM,
+ bool DisableVerify, AnalysisID StartAfter,
+ AnalysisID StopAfter,
+ MachineFunctionInitializer *MFInitializer = nullptr) {
// Add internal analysis passes from the target machine.
PM.add(createTargetTransformInfoWrapperPass(TM->getTargetIRAnalysis()));
@@ -121,7 +122,7 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
PM.add(MMI);
// Set up a MachineFunction for the rest of CodeGen to work on.
- PM.add(new MachineFunctionAnalysis(*TM));
+ PM.add(new MachineFunctionAnalysis(*TM, MFInitializer));
// Enable FastISel with -fast, but allow that to be overridden.
if (EnableFastISelOption == cl::BOU_TRUE ||
@@ -142,10 +143,11 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
bool LLVMTargetMachine::addPassesToEmitFile(
PassManagerBase &PM, raw_pwrite_stream &Out, CodeGenFileType FileType,
- bool DisableVerify, AnalysisID StartAfter, AnalysisID StopAfter) {
+ bool DisableVerify, AnalysisID StartAfter, AnalysisID StopAfter,
+ MachineFunctionInitializer *MFInitializer) {
// Add common CodeGen passes.
- MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify,
- StartAfter, StopAfter);
+ MCContext *Context = addPassesToGenerateCode(
+ this, PM, DisableVerify, StartAfter, StopAfter, MFInitializer);
if (!Context)
return true;
@@ -167,15 +169,15 @@ bool LLVMTargetMachine::addPassesToEmitFile(
switch (FileType) {
case CGFT_AssemblyFile: {
MCInstPrinter *InstPrinter = getTarget().createMCInstPrinter(
- Triple(getTargetTriple()), MAI.getAssemblerDialect(), MAI, MII, MRI);
+ getTargetTriple(), MAI.getAssemblerDialect(), MAI, MII, MRI);
// Create a code emitter if asked to show the encoding.
MCCodeEmitter *MCE = nullptr;
if (Options.MCOptions.ShowMCEncoding)
MCE = getTarget().createMCCodeEmitter(MII, MRI, *Context);
- MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
- TargetCPU);
+ MCAsmBackend *MAB =
+ getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU);
auto FOut = llvm::make_unique<formatted_raw_ostream>(Out);
MCStreamer *S = getTarget().createAsmStreamer(
*Context, std::move(FOut), Options.MCOptions.AsmVerbose,
@@ -188,15 +190,15 @@ bool LLVMTargetMachine::addPassesToEmitFile(
// Create the code emitter for the target if it exists. If not, .o file
// emission fails.
MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(MII, MRI, *Context);
- MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
- TargetCPU);
+ MCAsmBackend *MAB =
+ getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU);
if (!MCE || !MAB)
return true;
// Don't waste memory on names of temp labels.
Context->setUseNamesOnTempLabels(false);
- Triple T(getTargetTriple());
+ Triple T(getTargetTriple().str());
AsmStreamer.reset(getTarget().createMCObjectStreamer(
T, *Context, *MAB, Out, MCE, STI, Options.MCOptions.MCRelaxAll,
/*DWARFMustBeAtTheEnd*/ true));
@@ -241,12 +243,12 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
const MCRegisterInfo &MRI = *getMCRegisterInfo();
MCCodeEmitter *MCE =
getTarget().createMCCodeEmitter(*getMCInstrInfo(), MRI, *Ctx);
- MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
- TargetCPU);
+ MCAsmBackend *MAB =
+ getTarget().createMCAsmBackend(MRI, getTargetTriple().str(), TargetCPU);
if (!MCE || !MAB)
return true;
- Triple T(getTargetTriple());
+ const Triple &T = getTargetTriple();
const MCSubtargetInfo &STI = *getMCSubtargetInfo();
std::unique_ptr<MCStreamer> AsmStreamer(getTarget().createMCObjectStreamer(
T, *Ctx, *MAB, Out, MCE, STI, Options.MCOptions.MCRelaxAll,
diff --git a/lib/CodeGen/LiveVariables.cpp b/lib/CodeGen/LiveVariables.cpp
index 11deb813dde8..b355393e76f7 100644
--- a/lib/CodeGen/LiveVariables.cpp
+++ b/lib/CodeGen/LiveVariables.cpp
@@ -738,45 +738,22 @@ bool LiveVariables::VarInfo::isLiveIn(const MachineBasicBlock &MBB,
bool LiveVariables::isLiveOut(unsigned Reg, const MachineBasicBlock &MBB) {
LiveVariables::VarInfo &VI = getVarInfo(Reg);
+ SmallPtrSet<const MachineBasicBlock *, 8> Kills;
+ for (unsigned i = 0, e = VI.Kills.size(); i != e; ++i)
+ Kills.insert(VI.Kills[i]->getParent());
+
// Loop over all of the successors of the basic block, checking to see if
// the value is either live in the block, or if it is killed in the block.
- SmallVector<MachineBasicBlock*, 8> OpSuccBlocks;
- for (MachineBasicBlock::const_succ_iterator SI = MBB.succ_begin(),
- E = MBB.succ_end(); SI != E; ++SI) {
- MachineBasicBlock *SuccMBB = *SI;
-
+ for (const MachineBasicBlock *SuccMBB : MBB.successors()) {
// Is it alive in this successor?
unsigned SuccIdx = SuccMBB->getNumber();
if (VI.AliveBlocks.test(SuccIdx))
return true;
- OpSuccBlocks.push_back(SuccMBB);
+ // Or is it live because there is a use in a successor that kills it?
+ if (Kills.count(SuccMBB))
+ return true;
}
- // Check to see if this value is live because there is a use in a successor
- // that kills it.
- switch (OpSuccBlocks.size()) {
- case 1: {
- MachineBasicBlock *SuccMBB = OpSuccBlocks[0];
- for (unsigned i = 0, e = VI.Kills.size(); i != e; ++i)
- if (VI.Kills[i]->getParent() == SuccMBB)
- return true;
- break;
- }
- case 2: {
- MachineBasicBlock *SuccMBB1 = OpSuccBlocks[0], *SuccMBB2 = OpSuccBlocks[1];
- for (unsigned i = 0, e = VI.Kills.size(); i != e; ++i)
- if (VI.Kills[i]->getParent() == SuccMBB1 ||
- VI.Kills[i]->getParent() == SuccMBB2)
- return true;
- break;
- }
- default:
- std::sort(OpSuccBlocks.begin(), OpSuccBlocks.end());
- for (unsigned i = 0, e = VI.Kills.size(); i != e; ++i)
- if (std::binary_search(OpSuccBlocks.begin(), OpSuccBlocks.end(),
- VI.Kills[i]->getParent()))
- return true;
- }
return false;
}
diff --git a/lib/CodeGen/MIRParser/MIRParser.cpp b/lib/CodeGen/MIRParser/MIRParser.cpp
index 7a51b3881afc..1fef3f6dcb34 100644
--- a/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -14,10 +14,17 @@
#include "llvm/CodeGen/MIRParser/MIRParser.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/AsmParser/Parser.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
@@ -27,7 +34,7 @@
using namespace llvm;
-namespace {
+namespace llvm {
/// This class implements the parsing of LLVM IR that's embedded inside a MIR
/// file.
@@ -35,29 +42,56 @@ class MIRParserImpl {
SourceMgr SM;
StringRef Filename;
LLVMContext &Context;
+ StringMap<std::unique_ptr<yaml::MachineFunction>> Functions;
public:
MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents, StringRef Filename,
LLVMContext &Context);
+ void reportDiagnostic(const SMDiagnostic &Diag);
+
+ /// Report an error with the given message at unknown location.
+ ///
+ /// Always returns true.
+ bool error(const Twine &Message);
+
/// Try to parse the optional LLVM module and the machine functions in the MIR
/// file.
///
/// Return null if an error occurred.
- std::unique_ptr<Module> parse(SMDiagnostic &Error);
+ std::unique_ptr<Module> parse();
/// Parse the machine function in the current YAML document.
///
+ /// \param NoLLVMIR - set to true when the MIR file doesn't have LLVM IR.
+ /// A dummy IR function is created and inserted into the given module when
+ /// this parameter is true.
+ ///
+ /// Return true if an error occurred.
+ bool parseMachineFunction(yaml::Input &In, Module &M, bool NoLLVMIR);
+
+ /// Initialize the machine function to the state that's described in the MIR
+ /// file.
+ ///
+ /// Return true if error occurred.
+ bool initializeMachineFunction(MachineFunction &MF);
+
+ /// Initialize the machine basic block using it's YAML representation.
+ ///
/// Return true if an error occurred.
- bool parseMachineFunction(yaml::Input &In);
+ bool initializeMachineBasicBlock(MachineBasicBlock &MBB,
+ const yaml::MachineBasicBlock &YamlMBB);
private:
/// Return a MIR diagnostic converted from an LLVM assembly diagnostic.
SMDiagnostic diagFromLLVMAssemblyDiag(const SMDiagnostic &Error,
SMRange SourceRange);
+
+ /// Create an empty function with the given name.
+ void createDummyFunction(StringRef Name, Module &M);
};
-} // end anonymous namespace
+} // end namespace llvm
MIRParserImpl::MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents,
StringRef Filename, LLVMContext &Context)
@@ -65,30 +99,54 @@ MIRParserImpl::MIRParserImpl(std::unique_ptr<MemoryBuffer> Contents,
SM.AddNewSourceBuffer(std::move(Contents), SMLoc());
}
+bool MIRParserImpl::error(const Twine &Message) {
+ Context.diagnose(DiagnosticInfoMIRParser(
+ DS_Error, SMDiagnostic(Filename, SourceMgr::DK_Error, Message.str())));
+ return true;
+}
+
+void MIRParserImpl::reportDiagnostic(const SMDiagnostic &Diag) {
+ DiagnosticSeverity Kind;
+ switch (Diag.getKind()) {
+ case SourceMgr::DK_Error:
+ Kind = DS_Error;
+ break;
+ case SourceMgr::DK_Warning:
+ Kind = DS_Warning;
+ break;
+ case SourceMgr::DK_Note:
+ Kind = DS_Note;
+ break;
+ }
+ Context.diagnose(DiagnosticInfoMIRParser(Kind, Diag));
+}
+
static void handleYAMLDiag(const SMDiagnostic &Diag, void *Context) {
- *reinterpret_cast<SMDiagnostic *>(Context) = Diag;
+ reinterpret_cast<MIRParserImpl *>(Context)->reportDiagnostic(Diag);
}
-std::unique_ptr<Module> MIRParserImpl::parse(SMDiagnostic &Error) {
+std::unique_ptr<Module> MIRParserImpl::parse() {
yaml::Input In(SM.getMemoryBuffer(SM.getMainFileID())->getBuffer(),
- /*Ctxt=*/nullptr, handleYAMLDiag, &Error);
+ /*Ctxt=*/nullptr, handleYAMLDiag, this);
if (!In.setCurrentDocument()) {
- if (!Error.getMessage().empty())
+ if (In.error())
return nullptr;
// Create an empty module when the MIR file is empty.
return llvm::make_unique<Module>(Filename, Context);
}
std::unique_ptr<Module> M;
+ bool NoLLVMIR = false;
// Parse the block scalar manually so that we can return unique pointer
// without having to go trough YAML traits.
if (const auto *BSN =
dyn_cast_or_null<yaml::BlockScalarNode>(In.getCurrentNode())) {
+ SMDiagnostic Error;
M = parseAssembly(MemoryBufferRef(BSN->getValue(), Filename), Error,
Context);
if (!M) {
- Error = diagFromLLVMAssemblyDiag(Error, BSN->getSourceRange());
+ reportDiagnostic(diagFromLLVMAssemblyDiag(Error, BSN->getSourceRange()));
return M;
}
In.nextDocument();
@@ -97,11 +155,12 @@ std::unique_ptr<Module> MIRParserImpl::parse(SMDiagnostic &Error) {
} else {
// Create an new, empty module.
M = llvm::make_unique<Module>(Filename, Context);
+ NoLLVMIR = true;
}
// Parse the machine functions.
do {
- if (parseMachineFunction(In))
+ if (parseMachineFunction(In, *M, NoLLVMIR))
return nullptr;
In.nextDocument();
} while (In.setCurrentDocument());
@@ -109,13 +168,68 @@ std::unique_ptr<Module> MIRParserImpl::parse(SMDiagnostic &Error) {
return M;
}
-bool MIRParserImpl::parseMachineFunction(yaml::Input &In) {
- yaml::MachineFunction MF;
- yaml::yamlize(In, MF, false);
+bool MIRParserImpl::parseMachineFunction(yaml::Input &In, Module &M,
+ bool NoLLVMIR) {
+ auto MF = llvm::make_unique<yaml::MachineFunction>();
+ yaml::yamlize(In, *MF, false);
if (In.error())
return true;
- // TODO: Initialize the real machine function with the state in the yaml
- // machine function later on.
+ auto FunctionName = MF->Name;
+ if (Functions.find(FunctionName) != Functions.end())
+ return error(Twine("redefinition of machine function '") + FunctionName +
+ "'");
+ Functions.insert(std::make_pair(FunctionName, std::move(MF)));
+ if (NoLLVMIR)
+ createDummyFunction(FunctionName, M);
+ else if (!M.getFunction(FunctionName))
+ return error(Twine("function '") + FunctionName +
+ "' isn't defined in the provided LLVM IR");
+ return false;
+}
+
+void MIRParserImpl::createDummyFunction(StringRef Name, Module &M) {
+ auto &Context = M.getContext();
+ Function *F = cast<Function>(M.getOrInsertFunction(
+ Name, FunctionType::get(Type::getVoidTy(Context), false)));
+ BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
+ new UnreachableInst(Context, BB);
+}
+
+bool MIRParserImpl::initializeMachineFunction(MachineFunction &MF) {
+ auto It = Functions.find(MF.getName());
+ if (It == Functions.end())
+ return error(Twine("no machine function information for function '") +
+ MF.getName() + "' in the MIR file");
+ // TODO: Recreate the machine function.
+ const yaml::MachineFunction &YamlMF = *It->getValue();
+ if (YamlMF.Alignment)
+ MF.setAlignment(YamlMF.Alignment);
+ MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice);
+ MF.setHasInlineAsm(YamlMF.HasInlineAsm);
+ const auto &F = *MF.getFunction();
+ for (const auto &YamlMBB : YamlMF.BasicBlocks) {
+ const BasicBlock *BB = nullptr;
+ if (!YamlMBB.Name.empty()) {
+ BB = dyn_cast_or_null<BasicBlock>(
+ F.getValueSymbolTable().lookup(YamlMBB.Name));
+ if (!BB)
+ return error(Twine("basic block '") + YamlMBB.Name +
+ "' is not defined in the function '" + MF.getName() + "'");
+ }
+ auto *MBB = MF.CreateMachineBasicBlock(BB);
+ MF.insert(MF.end(), MBB);
+ if (initializeMachineBasicBlock(*MBB, YamlMBB))
+ return true;
+ }
+ return false;
+}
+
+bool MIRParserImpl::initializeMachineBasicBlock(
+ MachineBasicBlock &MBB, const yaml::MachineBasicBlock &YamlMBB) {
+ MBB.setAlignment(YamlMBB.Alignment);
+ if (YamlMBB.AddressTaken)
+ MBB.setHasAddressTaken();
+ MBB.setIsLandingPad(YamlMBB.IsLandingPad);
return false;
}
@@ -150,22 +264,33 @@ SMDiagnostic MIRParserImpl::diagFromLLVMAssemblyDiag(const SMDiagnostic &Error,
Error.getFixIts());
}
-std::unique_ptr<Module> llvm::parseMIRFile(StringRef Filename,
- SMDiagnostic &Error,
- LLVMContext &Context) {
+MIRParser::MIRParser(std::unique_ptr<MIRParserImpl> Impl)
+ : Impl(std::move(Impl)) {}
+
+MIRParser::~MIRParser() {}
+
+std::unique_ptr<Module> MIRParser::parseLLVMModule() { return Impl->parse(); }
+
+bool MIRParser::initializeMachineFunction(MachineFunction &MF) {
+ return Impl->initializeMachineFunction(MF);
+}
+
+std::unique_ptr<MIRParser> llvm::createMIRParserFromFile(StringRef Filename,
+ SMDiagnostic &Error,
+ LLVMContext &Context) {
auto FileOrErr = MemoryBuffer::getFile(Filename);
if (std::error_code EC = FileOrErr.getError()) {
Error = SMDiagnostic(Filename, SourceMgr::DK_Error,
"Could not open input file: " + EC.message());
- return std::unique_ptr<Module>();
+ return nullptr;
}
- return parseMIR(std::move(FileOrErr.get()), Error, Context);
+ return createMIRParser(std::move(FileOrErr.get()), Context);
}
-std::unique_ptr<Module> llvm::parseMIR(std::unique_ptr<MemoryBuffer> Contents,
- SMDiagnostic &Error,
- LLVMContext &Context) {
+std::unique_ptr<MIRParser>
+llvm::createMIRParser(std::unique_ptr<MemoryBuffer> Contents,
+ LLVMContext &Context) {
auto Filename = Contents->getBufferIdentifier();
- MIRParserImpl Parser(std::move(Contents), Filename, Context);
- return Parser.parse(Error);
+ return llvm::make_unique<MIRParser>(
+ llvm::make_unique<MIRParserImpl>(std::move(Contents), Filename, Context));
}
diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp
new file mode 100644
index 000000000000..bbf163a759ef
--- /dev/null
+++ b/lib/CodeGen/MIRPrinter.cpp
@@ -0,0 +1,96 @@
+//===- MIRPrinter.cpp - MIR serialization format printer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the class that prints out the LLVM IR and machine
+// functions using the MIR serialization format.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MIRPrinter.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MIRYamlMapping.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/YAMLTraits.h"
+
+using namespace llvm;
+
+namespace {
+
+/// This class prints out the machine functions using the MIR serialization
+/// format.
+class MIRPrinter {
+ raw_ostream &OS;
+
+public:
+ MIRPrinter(raw_ostream &OS) : OS(OS) {}
+
+ void print(const MachineFunction &MF);
+
+ void convert(yaml::MachineBasicBlock &YamlMBB, const MachineBasicBlock &MBB);
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace yaml {
+
+/// This struct serializes the LLVM IR module.
+template <> struct BlockScalarTraits<Module> {
+ static void output(const Module &Mod, void *Ctxt, raw_ostream &OS) {
+ Mod.print(OS, nullptr);
+ }
+ static StringRef input(StringRef Str, void *Ctxt, Module &Mod) {
+ llvm_unreachable("LLVM Module is supposed to be parsed separately");
+ return "";
+ }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+void MIRPrinter::print(const MachineFunction &MF) {
+ yaml::MachineFunction YamlMF;
+ YamlMF.Name = MF.getName();
+ YamlMF.Alignment = MF.getAlignment();
+ YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice();
+ YamlMF.HasInlineAsm = MF.hasInlineAsm();
+ for (const auto &MBB : MF) {
+ yaml::MachineBasicBlock YamlMBB;
+ convert(YamlMBB, MBB);
+ YamlMF.BasicBlocks.push_back(YamlMBB);
+ }
+ yaml::Output Out(OS);
+ Out << YamlMF;
+}
+
+void MIRPrinter::convert(yaml::MachineBasicBlock &YamlMBB,
+ const MachineBasicBlock &MBB) {
+ // TODO: Serialize unnamed BB references.
+ if (const auto *BB = MBB.getBasicBlock())
+ YamlMBB.Name = BB->hasName() ? BB->getName() : "<unnamed bb>";
+ else
+ YamlMBB.Name = "";
+ YamlMBB.Alignment = MBB.getAlignment();
+ YamlMBB.AddressTaken = MBB.hasAddressTaken();
+ YamlMBB.IsLandingPad = MBB.isLandingPad();
+}
+
+void llvm::printMIR(raw_ostream &OS, const Module &M) {
+ yaml::Output Out(OS);
+ Out << const_cast<Module &>(M);
+}
+
+void llvm::printMIR(raw_ostream &OS, const MachineFunction &MF) {
+ MIRPrinter Printer(OS);
+ Printer.print(MF);
+}
diff --git a/lib/CodeGen/MIRPrinter.h b/lib/CodeGen/MIRPrinter.h
new file mode 100644
index 000000000000..16aa9038b6b2
--- /dev/null
+++ b/lib/CodeGen/MIRPrinter.h
@@ -0,0 +1,33 @@
+//===- MIRPrinter.h - MIR serialization format printer --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the functions that print out the LLVM IR and the machine
+// functions using the MIR serialization format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_MIRPRINTER_H
+#define LLVM_LIB_CODEGEN_MIRPRINTER_H
+
+namespace llvm {
+
+class MachineFunction;
+class Module;
+class raw_ostream;
+
+/// Print LLVM IR using the MIR serialization format to the given output stream.
+void printMIR(raw_ostream &OS, const Module &M);
+
+/// Print a machine function using the MIR serialization format to the given
+/// output stream.
+void printMIR(raw_ostream &OS, const MachineFunction &MF);
+
+} // end namespace llvm
+
+#endif
diff --git a/lib/CodeGen/MIRPrintingPass.cpp b/lib/CodeGen/MIRPrintingPass.cpp
index 5e0f4cdcbfde..13d61e65d7e0 100644
--- a/lib/CodeGen/MIRPrintingPass.cpp
+++ b/lib/CodeGen/MIRPrintingPass.cpp
@@ -12,54 +12,17 @@
//
//===----------------------------------------------------------------------===//
+#include "MIRPrinter.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MIRYamlMapping.h"
-#include "llvm/IR/Module.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/YAMLTraits.h"
using namespace llvm;
-namespace llvm {
-namespace yaml {
-
-/// This struct serializes the LLVM IR module.
-template <> struct BlockScalarTraits<Module> {
- static void output(const Module &Mod, void *Ctxt, raw_ostream &OS) {
- Mod.print(OS, nullptr);
- }
- static StringRef input(StringRef Str, void *Ctxt, Module &Mod) {
- llvm_unreachable("LLVM Module is supposed to be parsed separately");
- return "";
- }
-};
-
-} // end namespace yaml
-} // end namespace llvm
-
namespace {
-/// This class prints out the machine functions using the MIR serialization
-/// format.
-class MIRPrinter {
- raw_ostream &OS;
-
-public:
- MIRPrinter(raw_ostream &OS) : OS(OS) {}
-
- void print(const MachineFunction &MF);
-};
-
-void MIRPrinter::print(const MachineFunction &MF) {
- yaml::MachineFunction YamlMF;
- YamlMF.Name = MF.getName();
- yaml::Output Out(OS);
- Out << YamlMF;
-}
-
/// This pass prints out the LLVM IR to an output stream using the MIR
/// serialization format.
struct MIRPrintingPass : public MachineFunctionPass {
@@ -80,14 +43,13 @@ struct MIRPrintingPass : public MachineFunctionPass {
virtual bool runOnMachineFunction(MachineFunction &MF) override {
std::string Str;
raw_string_ostream StrOS(Str);
- MIRPrinter(StrOS).print(MF);
+ printMIR(StrOS, MF);
MachineFunctions.append(StrOS.str());
return false;
}
virtual bool doFinalization(Module &M) override {
- yaml::Output Out(OS);
- Out << M;
+ printMIR(OS, M);
OS << MachineFunctions;
return false;
}
diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp
index 2969bad4ff98..141990bbe87d 100644
--- a/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/lib/CodeGen/MachineBlockPlacement.cpp
@@ -179,7 +179,7 @@ public:
/// in-loop predecessors of this chain.
unsigned LoopPredecessors;
};
-}
+} // namespace
namespace {
class MachineBlockPlacement : public MachineFunctionPass {
@@ -267,7 +267,7 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char MachineBlockPlacement::ID = 0;
char &llvm::MachineBlockPlacementID = MachineBlockPlacement::ID;
@@ -1185,7 +1185,7 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char MachineBlockPlacementStats::ID = 0;
char &llvm::MachineBlockPlacementStatsID = MachineBlockPlacementStats::ID;
diff --git a/lib/CodeGen/MachineCombiner.cpp b/lib/CodeGen/MachineCombiner.cpp
index a4bc77edb753..5019e8eef19b 100644
--- a/lib/CodeGen/MachineCombiner.cpp
+++ b/lib/CodeGen/MachineCombiner.cpp
@@ -78,7 +78,7 @@ private:
void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs,
SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC);
};
-}
+} // namespace
char MachineCombiner::ID = 0;
char &llvm::MachineCombinerID = MachineCombiner::ID;
@@ -223,14 +223,14 @@ bool MachineCombiner::preservesCriticalPathLen(
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) {
assert(TSchedModel.hasInstrSchedModel() && "Missing machine model\n");
- // NewRoot is the last instruction in the \p InsInstrs vector
- // Get depth and latency of NewRoot
+ // NewRoot is the last instruction in the \p InsInstrs vector.
+ // Get depth and latency of NewRoot.
unsigned NewRootIdx = InsInstrs.size() - 1;
MachineInstr *NewRoot = InsInstrs[NewRootIdx];
unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
unsigned NewRootLatency = getLatency(Root, NewRoot, BlockTrace);
- // Get depth, latency and slack of Root
+ // Get depth, latency and slack of Root.
unsigned RootDepth = BlockTrace.getInstrCycles(Root).Depth;
unsigned RootLatency = TSchedModel.computeInstrLatency(Root);
unsigned RootSlack = BlockTrace.getInstrSlack(Root);
@@ -245,7 +245,7 @@ bool MachineCombiner::preservesCriticalPathLen(
dbgs() << " RootDepth + RootLatency + RootSlack "
<< RootDepth + RootLatency + RootSlack << "\n";);
- /// True when the new sequence does not lenghten the critical path.
+ /// True when the new sequence does not lengthen the critical path.
return ((NewRootDepth + NewRootLatency) <=
(RootDepth + RootLatency + RootSlack));
}
@@ -284,7 +284,7 @@ bool MachineCombiner::preservesResourceLen(
ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC);
ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC);
- // Compute new resource length
+ // Compute new resource length.
unsigned ResLenAfterCombine =
BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr);
@@ -322,7 +322,7 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
auto &MI = *BlockIter++;
DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";);
- SmallVector<MachineCombinerPattern::MC_PATTERN, 16> Pattern;
+ SmallVector<MachineCombinerPattern::MC_PATTERN, 16> Patterns;
// The motivating example is:
//
// MUL Other MUL_op1 MUL_op2 Other
@@ -345,11 +345,11 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
//
// The algorithm does not try to evaluate all patterns and pick the best.
// This is only an artificial restriction though. In practice there is
- // mostly one pattern and hasPattern() can order patterns based on an
- // internal cost heuristic.
+ // mostly one pattern, and getMachineCombinerPatterns() can order patterns
+ // based on an internal cost heuristic.
- if (TII->hasPattern(MI, Pattern)) {
- for (auto P : Pattern) {
+ if (TII->getMachineCombinerPatterns(MI, Patterns)) {
+ for (auto P : Patterns) {
SmallVector<MachineInstr *, 16> InsInstrs;
SmallVector<MachineInstr *, 16> DelInstrs;
DenseMap<unsigned, unsigned> InstrIdxForVirtReg;
@@ -373,8 +373,7 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
InstrIdxForVirtReg) &&
preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs))) {
for (auto *InstrPtr : InsInstrs)
- MBB->insert((MachineBasicBlock::iterator) & MI,
- (MachineInstr *)InstrPtr);
+ MBB->insert((MachineBasicBlock::iterator) &MI, InstrPtr);
for (auto *InstrPtr : DelInstrs)
InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval();
@@ -383,15 +382,14 @@ bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
Traces->invalidate(MBB);
Traces->verifyAnalysis();
- // Eagerly stop after the first pattern fired
+ // Eagerly stop after the first pattern fires.
break;
} else {
// Cleanup instructions of the alternative code sequence. There is no
// use for them.
- for (auto *InstrPtr : InsInstrs) {
- MachineFunction *MF = MBB->getParent();
- MF->DeleteMachineInstr((MachineInstr *)InstrPtr);
- }
+ MachineFunction *MF = MBB->getParent();
+ for (auto *InstrPtr : InsInstrs)
+ MF->DeleteMachineInstr(InstrPtr);
}
InstrIdxForVirtReg.clear();
}
diff --git a/lib/CodeGen/MachineCopyPropagation.cpp b/lib/CodeGen/MachineCopyPropagation.cpp
index a6863412132b..ec171b0cae0c 100644
--- a/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/lib/CodeGen/MachineCopyPropagation.cpp
@@ -55,7 +55,7 @@ namespace {
DenseMap<unsigned, MachineInstr*> &AvailCopyMap);
bool CopyPropagateBlock(MachineBasicBlock &MBB);
};
-}
+} // namespace
char MachineCopyPropagation::ID = 0;
char &llvm::MachineCopyPropagationID = MachineCopyPropagation::ID;
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index 09662b6e48d3..67b9d77697e9 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -19,6 +19,7 @@
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunctionInitializer.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
@@ -41,11 +42,13 @@ using namespace llvm;
#define DEBUG_TYPE "codegen"
+void MachineFunctionInitializer::anchor() {}
+
//===----------------------------------------------------------------------===//
// MachineFunction implementation
//===----------------------------------------------------------------------===//
-// Out of line virtual method.
+// Out-of-line virtual method.
MachineFunctionInfo::~MachineFunctionInfo() {}
void ilist_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
@@ -114,8 +117,8 @@ MachineFunction::~MachineFunction() {
}
}
-/// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
-/// does already exist, allocate one.
+/// Get the JumpTableInfo for this function.
+/// If it does not already exist, allocate one.
MachineJumpTableInfo *MachineFunction::
getOrCreateJumpTableInfo(unsigned EntryKind) {
if (JumpTableInfo) return JumpTableInfo;
@@ -130,11 +133,10 @@ bool MachineFunction::shouldSplitStack() {
return getFunction()->hasFnAttribute("split-stack");
}
-/// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
-/// recomputes them. This guarantees that the MBB numbers are sequential,
-/// dense, and match the ordering of the blocks within the function. If a
-/// specific MachineBasicBlock is specified, only that block and those after
-/// it are renumbered.
+/// This discards all of the MachineBasicBlock numbers and recomputes them.
+/// This guarantees that the MBB numbers are sequential, dense, and match the
+/// ordering of the blocks within the function. If a specific MachineBasicBlock
+/// is specified, only that block and those after it are renumbered.
void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
if (empty()) { MBBNumbering.clear(); return; }
MachineFunction::iterator MBBI, E = end();
@@ -172,9 +174,7 @@ void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
MBBNumbering.resize(BlockNo);
}
-/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
-/// of `new MachineInstr'.
-///
+/// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
MachineInstr *
MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
DebugLoc DL, bool NoImp) {
@@ -182,17 +182,15 @@ MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
MachineInstr(*this, MCID, DL, NoImp);
}
-/// CloneMachineInstr - Create a new MachineInstr which is a copy of the
-/// 'Orig' instruction, identical in all ways except the instruction
-/// has no parent, prev, or next.
-///
+/// Create a new MachineInstr which is a copy of the 'Orig' instruction,
+/// identical in all ways except the instruction has no parent, prev, or next.
MachineInstr *
MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
MachineInstr(*this, *Orig);
}
-/// DeleteMachineInstr - Delete the given MachineInstr.
+/// Delete the given MachineInstr.
///
/// This function also serves as the MachineInstr destructor - the real
/// ~MachineInstr() destructor must be empty.
@@ -208,17 +206,15 @@ MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
InstructionRecycler.Deallocate(Allocator, MI);
}
-/// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
-/// instead of `new MachineBasicBlock'.
-///
+/// Allocate a new MachineBasicBlock. Use this instead of
+/// `new MachineBasicBlock'.
MachineBasicBlock *
MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
MachineBasicBlock(*this, bb);
}
-/// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
-///
+/// Delete the given MachineBasicBlock.
void
MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
assert(MBB->getParent() == this && "MBB parent mismatch!");
@@ -408,7 +404,7 @@ namespace llvm {
return OutStr;
}
};
-}
+} // namespace llvm
void MachineFunction::viewCFG() const
{
@@ -430,7 +426,7 @@ void MachineFunction::viewCFGOnly() const
#endif // NDEBUG
}
-/// addLiveIn - Add the specified physical register as a live-in value and
+/// Add the specified physical register as a live-in value and
/// create a corresponding virtual register for it.
unsigned MachineFunction::addLiveIn(unsigned PReg,
const TargetRegisterClass *RC) {
@@ -454,7 +450,7 @@ unsigned MachineFunction::addLiveIn(unsigned PReg,
return VReg;
}
-/// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
+/// Return the MCSymbol for the specified non-empty jump table.
/// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
/// normal 'L' label is returned.
MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
@@ -471,8 +467,7 @@ MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
return Ctx.getOrCreateSymbol(Name);
}
-/// getPICBaseSymbol - Return a function-local symbol to represent the PIC
-/// base.
+/// Return a function-local symbol to represent the PIC base.
MCSymbol *MachineFunction::getPICBaseSymbol() const {
const DataLayout *DL = getTarget().getDataLayout();
return Ctx.getOrCreateSymbol(Twine(DL->getPrivateGlobalPrefix())+
@@ -483,8 +478,7 @@ MCSymbol *MachineFunction::getPICBaseSymbol() const {
// MachineFrameInfo implementation
//===----------------------------------------------------------------------===//
-/// ensureMaxAlignment - Make sure the function is at least Align bytes
-/// aligned.
+/// Make sure the function is at least Align bytes aligned.
void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
if (!StackRealignable || !RealignOption)
assert(Align <= StackAlignment &&
@@ -492,7 +486,7 @@ void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
if (MaxAlignment < Align) MaxAlignment = Align;
}
-/// clampStackAlignment - Clamp the alignment if requested and emit a warning.
+/// Clamp the alignment if requested and emit a warning.
static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
unsigned StackAlign) {
if (!ShouldClamp || Align <= StackAlign)
@@ -503,9 +497,8 @@ static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
return StackAlign;
}
-/// CreateStackObject - Create a new statically sized stack object, returning
-/// a nonnegative identifier to represent it.
-///
+/// Create a new statically sized stack object, returning a nonnegative
+/// identifier to represent it.
int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
bool isSS, const AllocaInst *Alloca) {
assert(Size != 0 && "Cannot allocate zero size stack objects!");
@@ -519,10 +512,8 @@ int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
return Index;
}
-/// CreateSpillStackObject - Create a new statically sized stack object that
-/// represents a spill slot, returning a nonnegative identifier to represent
-/// it.
-///
+/// Create a new statically sized stack object that represents a spill slot,
+/// returning a nonnegative identifier to represent it.
int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
unsigned Alignment) {
Alignment = clampStackAlignment(!StackRealignable || !RealignOption,
@@ -533,11 +524,9 @@ int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
return Index;
}
-/// CreateVariableSizedObject - Notify the MachineFrameInfo object that a
-/// variable sized object has been created. This must be created whenever a
-/// variable sized object is created, whether or not the index returned is
-/// actually used.
-///
+/// Notify the MachineFrameInfo object that a variable sized object has been
+/// created. This must be created whenever a variable sized object is created,
+/// whether or not the index returned is actually used.
int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
const AllocaInst *Alloca) {
HasVarSizedObjects = true;
@@ -548,11 +537,10 @@ int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
return (int)Objects.size()-NumFixedObjects-1;
}
-/// CreateFixedObject - Create a new object at a fixed location on the stack.
+/// Create a new object at a fixed location on the stack.
/// All fixed objects should be created before other objects are created for
/// efficiency. By default, fixed objects are immutable. This returns an
/// index with a negative value.
-///
int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
bool Immutable, bool isAliased) {
assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
@@ -569,8 +557,8 @@ int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
return -++NumFixedObjects;
}
-/// CreateFixedSpillStackObject - Create a spill slot at a fixed location
-/// on the stack. Returns an index with a negative value.
+/// Create a spill slot at a fixed location on the stack.
+/// Returns an index with a negative value.
int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
int64_t SPOffset) {
unsigned Align = MinAlign(SPOffset, StackAlignment);
@@ -700,7 +688,7 @@ void MachineFrameInfo::dump(const MachineFunction &MF) const {
// MachineJumpTableInfo implementation
//===----------------------------------------------------------------------===//
-/// getEntrySize - Return the size of each entry in the jump table.
+/// Return the size of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
// The size of a jump table entry is 4 bytes unless the entry is just the
// address of a block, in which case it is the pointer size.
@@ -719,7 +707,7 @@ unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
llvm_unreachable("Unknown jump table encoding!");
}
-/// getEntryAlignment - Return the alignment of each entry in the jump table.
+/// Return the alignment of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
// The alignment of a jump table entry is the alignment of int32 unless the
// entry is just the address of a block, in which case it is the pointer
@@ -739,8 +727,7 @@ unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
llvm_unreachable("Unknown jump table encoding!");
}
-/// createJumpTableIndex - Create a new jump table entry in the jump table info.
-///
+/// Create a new jump table entry in the jump table info.
unsigned MachineJumpTableInfo::createJumpTableIndex(
const std::vector<MachineBasicBlock*> &DestBBs) {
assert(!DestBBs.empty() && "Cannot create an empty jump table!");
@@ -748,8 +735,8 @@ unsigned MachineJumpTableInfo::createJumpTableIndex(
return JumpTables.size()-1;
}
-/// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
-/// the jump tables to branch to New instead.
+/// If Old is the target of any jump tables, update the jump tables to branch
+/// to New instead.
bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
MachineBasicBlock *New) {
assert(Old != New && "Not making a change?");
@@ -759,8 +746,8 @@ bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
return MadeChange;
}
-/// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
-/// the jump table to branch to New instead.
+/// If Old is a target of the jump tables, update the jump table to branch to
+/// New instead.
bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
MachineBasicBlock *Old,
MachineBasicBlock *New) {
@@ -858,8 +845,8 @@ MachineConstantPool::~MachineConstantPool() {
delete *I;
}
-/// CanShareConstantPoolEntry - Test whether the given two constants
-/// can be allocated the same constant pool entry.
+/// Test whether the given two constants can be allocated the same constant pool
+/// entry.
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
const DataLayout *TD) {
// Handle the trivial case quickly.
@@ -901,10 +888,8 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
return A == B;
}
-/// getConstantPoolIndex - Create a new entry in the constant pool or return
-/// an existing one. User must specify the log2 of the minimum required
-/// alignment for the object.
-///
+/// Create a new entry in the constant pool or return an existing one.
+/// User must specify the log2 of the minimum required alignment for the object.
unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
diff --git a/lib/CodeGen/MachineFunctionAnalysis.cpp b/lib/CodeGen/MachineFunctionAnalysis.cpp
index f6f34ba9d927..338cd1e22032 100644
--- a/lib/CodeGen/MachineFunctionAnalysis.cpp
+++ b/lib/CodeGen/MachineFunctionAnalysis.cpp
@@ -15,12 +15,14 @@
#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineFunctionInitializer.h"
using namespace llvm;
char MachineFunctionAnalysis::ID = 0;
-MachineFunctionAnalysis::MachineFunctionAnalysis(const TargetMachine &tm) :
- FunctionPass(ID), TM(tm), MF(nullptr) {
+MachineFunctionAnalysis::MachineFunctionAnalysis(
+ const TargetMachine &tm, MachineFunctionInitializer *MFInitializer)
+ : FunctionPass(ID), TM(tm), MF(nullptr), MFInitializer(MFInitializer) {
initializeMachineModuleInfoPass(*PassRegistry::getPassRegistry());
}
@@ -47,6 +49,8 @@ bool MachineFunctionAnalysis::runOnFunction(Function &F) {
assert(!MF && "MachineFunctionAnalysis already initialized!");
MF = new MachineFunction(&F, TM, NextFnNum++,
getAnalysis<MachineModuleInfo>());
+ if (MFInitializer)
+ MFInitializer->initializeMachineFunction(*MF);
return false;
}
diff --git a/lib/CodeGen/MachineFunctionPrinterPass.cpp b/lib/CodeGen/MachineFunctionPrinterPass.cpp
index 790f5accdb26..57b7230e6cd5 100644
--- a/lib/CodeGen/MachineFunctionPrinterPass.cpp
+++ b/lib/CodeGen/MachineFunctionPrinterPass.cpp
@@ -49,7 +49,7 @@ struct MachineFunctionPrinterPass : public MachineFunctionPass {
};
char MachineFunctionPrinterPass::ID = 0;
-}
+} // namespace
char &llvm::MachineFunctionPrinterPassID = MachineFunctionPrinterPass::ID;
INITIALIZE_PASS(MachineFunctionPrinterPass, "machineinstr-printer",
diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp
index e67102865bfa..19ba5cfd34b0 100644
--- a/lib/CodeGen/MachineInstr.cpp
+++ b/lib/CodeGen/MachineInstr.cpp
@@ -1450,9 +1450,9 @@ bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
if (const Value *V = (*I)->getValue()) {
// If we have an AliasAnalysis, ask it whether the memory is constant.
- if (AA && AA->pointsToConstantMemory(
- AliasAnalysis::Location(V, (*I)->getSize(),
- (*I)->getAAInfo())))
+ if (AA &&
+ AA->pointsToConstantMemory(
+ MemoryLocation(V, (*I)->getSize(), (*I)->getAAInfo())))
continue;
}
diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp
index cce590c6dc5b..e9ea5ed9648c 100644
--- a/lib/CodeGen/MachineLICM.cpp
+++ b/lib/CodeGen/MachineLICM.cpp
@@ -27,7 +27,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -74,7 +74,7 @@ namespace {
const TargetRegisterInfo *TRI;
const MachineFrameInfo *MFI;
MachineRegisterInfo *MRI;
- const InstrItineraryData *InstrItins;
+ TargetSchedModel SchedModel;
bool PreRegAlloc;
// Various analyses that we use...
@@ -338,12 +338,13 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
return false;
Changed = FirstInLoop = false;
- TII = MF.getSubtarget().getInstrInfo();
- TLI = MF.getSubtarget().getTargetLowering();
- TRI = MF.getSubtarget().getRegisterInfo();
+ const TargetSubtargetInfo &ST = MF.getSubtarget();
+ TII = ST.getInstrInfo();
+ TLI = ST.getTargetLowering();
+ TRI = ST.getRegisterInfo();
MFI = MF.getFrameInfo();
MRI = &MF.getRegInfo();
- InstrItins = MF.getSubtarget().getInstrItineraryData();
+ SchedModel.init(ST.getSchedModel(), &ST, TII);
PreRegAlloc = MRI->isSSA();
@@ -1046,7 +1047,7 @@ bool MachineLICM::HasLoopPHIUse(const MachineInstr *MI) const {
/// it 'high'.
bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
unsigned DefIdx, unsigned Reg) const {
- if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
+ if (MRI->use_nodbg_empty(Reg))
return false;
for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) {
@@ -1062,7 +1063,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
if (MOReg != Reg)
continue;
- if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, &UseMI, i))
+ if (TII->hasHighOperandLatency(SchedModel, MRI, &MI, DefIdx, &UseMI, i))
return true;
}
@@ -1078,8 +1079,6 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
if (TII->isAsCheapAsAMove(&MI) || MI.isCopyLike())
return true;
- if (!InstrItins || InstrItins->isEmpty())
- return false;
bool isCheap = false;
unsigned NumDefs = MI.getDesc().getNumDefs();
@@ -1092,7 +1091,7 @@ bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
continue;
- if (!TII->hasLowDefLatency(InstrItins, &MI, i))
+ if (!TII->hasLowDefLatency(SchedModel, &MI, i))
return false;
isCheap = true;
}
diff --git a/lib/CodeGen/MachineModuleInfo.cpp b/lib/CodeGen/MachineModuleInfo.cpp
index eec984f53b90..a303426b420c 100644
--- a/lib/CodeGen/MachineModuleInfo.cpp
+++ b/lib/CodeGen/MachineModuleInfo.cpp
@@ -97,7 +97,7 @@ public:
void UpdateForDeletedBlock(BasicBlock *BB);
void UpdateForRAUWBlock(BasicBlock *Old, BasicBlock *New);
};
-}
+} // namespace llvm
MCSymbol *MMIAddrLabelMap::getAddrLabelSymbol(BasicBlock *BB) {
assert(BB->hasAddressTaken() &&
@@ -318,23 +318,6 @@ void MachineModuleInfo::EndFunction() {
VariableDbgInfos.clear();
}
-/// AnalyzeModule - Scan the module for global debug information.
-///
-void MachineModuleInfo::AnalyzeModule(const Module &M) {
- // Insert functions in the llvm.used array (but not llvm.compiler.used) into
- // UsedFunctions.
- const GlobalVariable *GV = M.getGlobalVariable("llvm.used");
- if (!GV || !GV->hasInitializer()) return;
-
- // Should be an array of 'i8*'.
- const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
-
- for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
- if (const Function *F =
- dyn_cast<Function>(InitList->getOperand(i)->stripPointerCasts()))
- UsedFunctions.insert(F);
-}
-
//===- Address of Block Management ----------------------------------------===//
diff --git a/lib/CodeGen/MachineSSAUpdater.cpp b/lib/CodeGen/MachineSSAUpdater.cpp
index 71a6ebaba243..fd1bf31aa3e5 100644
--- a/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/lib/CodeGen/MachineSSAUpdater.cpp
@@ -340,7 +340,7 @@ public:
}
};
-} // End llvm namespace
+} // namespace llvm
/// GetValueAtEndOfBlockInternal - Check to see if AvailableVals has an entry
/// for the specified BB and if so, return it. If not, construct SSA form by
diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp
index 44107d6ad16b..dd7654b1e556 100644
--- a/lib/CodeGen/MachineScheduler.cpp
+++ b/lib/CodeGen/MachineScheduler.cpp
@@ -347,7 +347,7 @@ bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
if (skipOptnoneFunction(*mf.getFunction()))
return false;
- if (!mf.getSubtarget().enablePostMachineScheduler()) {
+ if (!mf.getSubtarget().enablePostRAScheduler()) {
DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
return false;
}
@@ -1262,7 +1262,7 @@ public:
protected:
void clusterNeighboringLoads(ArrayRef<SUnit*> Loads, ScheduleDAGMI *DAG);
};
-} // anonymous
+} // namespace
void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
ScheduleDAGMI *DAG) {
@@ -1271,7 +1271,7 @@ void LoadClusterMutation::clusterNeighboringLoads(ArrayRef<SUnit*> Loads,
SUnit *SU = Loads[Idx];
unsigned BaseReg;
unsigned Offset;
- if (TII->getLdStBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
+ if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI))
LoadRecords.push_back(LoadInfo(SU, BaseReg, Offset));
}
if (LoadRecords.size() < 2)
@@ -1355,7 +1355,7 @@ public:
void apply(ScheduleDAGMI *DAG) override;
};
-} // anonymous
+} // namespace
/// \brief Callback from DAG postProcessing to create cluster edges to encourage
/// fused operations.
@@ -1407,7 +1407,7 @@ public:
protected:
void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
};
-} // anonymous
+} // namespace
/// constrainLocalCopy handles two possibilities:
/// 1) Local src:
@@ -2150,7 +2150,7 @@ void GenericSchedulerBase::setPolicy(CandPolicy &Policy,
bool IsPostRA,
SchedBoundary &CurrZone,
SchedBoundary *OtherZone) {
- // Apply preemptive heuristics based on the the total latency and resources
+ // Apply preemptive heuristics based on the total latency and resources
// inside and outside this zone. Potential stalls should be considered before
// following this policy.
diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp
index aed0e500d441..1b9be50068a9 100644
--- a/lib/CodeGen/MachineSink.cpp
+++ b/lib/CodeGen/MachineSink.cpp
@@ -73,6 +73,9 @@ namespace {
SparseBitVector<> RegsToClearKillFlags;
+ typedef std::map<MachineBasicBlock *, SmallVector<MachineBasicBlock *, 4>>
+ AllSuccsCache;
+
public:
static char ID; // Pass identification
MachineSinking() : MachineFunctionPass(ID) {
@@ -120,18 +123,24 @@ namespace {
MachineBasicBlock *From,
MachineBasicBlock *To,
bool BreakPHIEdge);
- bool SinkInstruction(MachineInstr *MI, bool &SawStore);
+ bool SinkInstruction(MachineInstr *MI, bool &SawStore,
+ AllSuccsCache &AllSuccessors);
bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
MachineBasicBlock *DefMBB,
bool &BreakPHIEdge, bool &LocalUse) const;
MachineBasicBlock *FindSuccToSinkTo(MachineInstr *MI, MachineBasicBlock *MBB,
- bool &BreakPHIEdge);
+ bool &BreakPHIEdge, AllSuccsCache &AllSuccessors);
bool isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
MachineBasicBlock *MBB,
- MachineBasicBlock *SuccToSinkTo);
+ MachineBasicBlock *SuccToSinkTo,
+ AllSuccsCache &AllSuccessors);
bool PerformTrivialForwardCoalescing(MachineInstr *MI,
MachineBasicBlock *MBB);
+
+ SmallVector<MachineBasicBlock *, 4> &
+ GetAllSortedSuccessors(MachineInstr *MI, MachineBasicBlock *MBB,
+ AllSuccsCache &AllSuccessors) const;
};
} // end anonymous namespace
@@ -269,9 +278,8 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
// Process all basic blocks.
CEBCandidates.clear();
ToSplit.clear();
- for (MachineFunction::iterator I = MF.begin(), E = MF.end();
- I != E; ++I)
- MadeChange |= ProcessBlock(*I);
+ for (auto &MBB: MF)
+ MadeChange |= ProcessBlock(MBB);
// If we have anything we marked as toSplit, split it now.
for (auto &Pair : ToSplit) {
@@ -310,6 +318,9 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
bool MadeChange = false;
+ // Cache all successors, sorted by frequency info and loop depth.
+ AllSuccsCache AllSuccessors;
+
// Walk the basic block bottom-up. Remember if we saw a store.
MachineBasicBlock::iterator I = MBB.end();
--I;
@@ -332,7 +343,7 @@ bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
continue;
}
- if (SinkInstruction(MI, SawStore))
+ if (SinkInstruction(MI, SawStore, AllSuccessors))
++NumSunk, MadeChange = true;
// If we just processed the first instruction in the block, we're done.
@@ -484,7 +495,8 @@ static void collectDebugValues(MachineInstr *MI,
/// isProfitableToSinkTo - Return true if it is profitable to sink MI.
bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
MachineBasicBlock *MBB,
- MachineBasicBlock *SuccToSinkTo) {
+ MachineBasicBlock *SuccToSinkTo,
+ AllSuccsCache &AllSuccessors) {
assert (MI && "Invalid MachineInstr!");
assert (SuccToSinkTo && "Invalid SinkTo Candidate BB");
@@ -514,18 +526,66 @@ bool MachineSinking::isProfitableToSinkTo(unsigned Reg, MachineInstr *MI,
// can further profitably sinked into another block in next round.
bool BreakPHIEdge = false;
// FIXME - If finding successor is compile time expensive then cache results.
- if (MachineBasicBlock *MBB2 = FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge))
- return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2);
+ if (MachineBasicBlock *MBB2 =
+ FindSuccToSinkTo(MI, SuccToSinkTo, BreakPHIEdge, AllSuccessors))
+ return isProfitableToSinkTo(Reg, MI, SuccToSinkTo, MBB2, AllSuccessors);
// If SuccToSinkTo is final destination and it is a post dominator of current
// block then it is not profitable to sink MI into SuccToSinkTo block.
return false;
}
+/// Get the sorted sequence of successors for this MachineBasicBlock, possibly
+/// computing it if it was not already cached.
+SmallVector<MachineBasicBlock *, 4> &
+MachineSinking::GetAllSortedSuccessors(MachineInstr *MI, MachineBasicBlock *MBB,
+ AllSuccsCache &AllSuccessors) const {
+
+ // Do we have the sorted successors in cache ?
+ auto Succs = AllSuccessors.find(MBB);
+ if (Succs != AllSuccessors.end())
+ return Succs->second;
+
+ SmallVector<MachineBasicBlock *, 4> AllSuccs(MBB->succ_begin(),
+ MBB->succ_end());
+
+ // Handle cases where sinking can happen but where the sink point isn't a
+ // successor. For example:
+ //
+ // x = computation
+ // if () {} else {}
+ // use x
+ //
+ const std::vector<MachineDomTreeNode *> &Children =
+ DT->getNode(MBB)->getChildren();
+ for (const auto &DTChild : Children)
+ // DomTree children of MBB that have MBB as immediate dominator are added.
+ if (DTChild->getIDom()->getBlock() == MI->getParent() &&
+ // Skip MBBs already added to the AllSuccs vector above.
+ !MBB->isSuccessor(DTChild->getBlock()))
+ AllSuccs.push_back(DTChild->getBlock());
+
+ // Sort Successors according to their loop depth or block frequency info.
+ std::stable_sort(
+ AllSuccs.begin(), AllSuccs.end(),
+ [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
+ uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
+ uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
+ bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
+ return HasBlockFreq ? LHSFreq < RHSFreq
+ : LI->getLoopDepth(L) < LI->getLoopDepth(R);
+ });
+
+ auto it = AllSuccessors.insert(std::make_pair(MBB, AllSuccs));
+
+ return it.first->second;
+}
+
/// FindSuccToSinkTo - Find a successor to sink this instruction to.
MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
MachineBasicBlock *MBB,
- bool &BreakPHIEdge) {
+ bool &BreakPHIEdge,
+ AllSuccsCache &AllSuccessors) {
assert (MI && "Invalid MachineInstr!");
assert (MBB && "Invalid MachineBasicBlock!");
@@ -579,38 +639,8 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// we should sink to. If we have reliable block frequency information
// (frequency != 0) available, give successors with smaller frequencies
// higher priority, otherwise prioritize smaller loop depths.
- SmallVector<MachineBasicBlock*, 4> Succs(MBB->succ_begin(),
- MBB->succ_end());
-
- // Handle cases where sinking can happen but where the sink point isn't a
- // successor. For example:
- //
- // x = computation
- // if () {} else {}
- // use x
- //
- const std::vector<MachineDomTreeNode *> &Children =
- DT->getNode(MBB)->getChildren();
- for (const auto &DTChild : Children)
- // DomTree children of MBB that have MBB as immediate dominator are added.
- if (DTChild->getIDom()->getBlock() == MI->getParent() &&
- // Skip MBBs already added to the Succs vector above.
- !MBB->isSuccessor(DTChild->getBlock()))
- Succs.push_back(DTChild->getBlock());
-
- // Sort Successors according to their loop depth or block frequency info.
- std::stable_sort(
- Succs.begin(), Succs.end(),
- [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
- uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
- uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
- bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
- return HasBlockFreq ? LHSFreq < RHSFreq
- : LI->getLoopDepth(L) < LI->getLoopDepth(R);
- });
- for (SmallVectorImpl<MachineBasicBlock *>::iterator SI = Succs.begin(),
- E = Succs.end(); SI != E; ++SI) {
- MachineBasicBlock *SuccBlock = *SI;
+ for (MachineBasicBlock *SuccBlock :
+ GetAllSortedSuccessors(MI, MBB, AllSuccessors)) {
bool LocalUse = false;
if (AllUsesDominatedByBlock(Reg, SuccBlock, MBB,
BreakPHIEdge, LocalUse)) {
@@ -625,7 +655,7 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
// If we couldn't find a block to sink to, ignore this instruction.
if (!SuccToSinkTo)
return nullptr;
- if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo))
+ if (!isProfitableToSinkTo(Reg, MI, MBB, SuccToSinkTo, AllSuccessors))
return nullptr;
}
}
@@ -645,7 +675,8 @@ MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI,
/// SinkInstruction - Determine whether it is safe to sink the specified machine
/// instruction out of its current block into a successor.
-bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
+bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore,
+ AllSuccsCache &AllSuccessors) {
// Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
// be close to the source to make it easier to coalesce.
if (AvoidsSinking(MI, MRI))
@@ -669,8 +700,8 @@ bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
bool BreakPHIEdge = false;
MachineBasicBlock *ParentBlock = MI->getParent();
- MachineBasicBlock *SuccToSinkTo = FindSuccToSinkTo(MI, ParentBlock,
- BreakPHIEdge);
+ MachineBasicBlock *SuccToSinkTo =
+ FindSuccToSinkTo(MI, ParentBlock, BreakPHIEdge, AllSuccessors);
// If there are no outputs, it must have side-effects.
if (!SuccToSinkTo)
diff --git a/lib/CodeGen/MachineTraceMetrics.cpp b/lib/CodeGen/MachineTraceMetrics.cpp
index 34ac9d5b0ed7..7704d1498be0 100644
--- a/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/lib/CodeGen/MachineTraceMetrics.cpp
@@ -306,7 +306,7 @@ public:
MinInstrCountEnsemble(MachineTraceMetrics *mtm)
: MachineTraceMetrics::Ensemble(mtm) {}
};
-}
+} // namespace
// Select the preferred predecessor for MBB.
const MachineBasicBlock*
@@ -414,7 +414,7 @@ struct LoopBounds {
const MachineLoopInfo *loops)
: Blocks(blocks), Loops(loops), Downward(false) {}
};
-}
+} // namespace
// Specialize po_iterator_storage in order to prune the post-order traversal so
// it is limited to the current loop and doesn't traverse the loop back edges.
@@ -447,7 +447,7 @@ public:
return LB.Visited.insert(To).second;
}
};
-}
+} // namespace llvm
/// Compute the trace through MBB.
void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
@@ -619,7 +619,7 @@ struct DataDep {
assert((++DefI).atEnd() && "Register has multiple defs");
}
};
-}
+} // namespace
// Get the input data dependencies that must be ready before UseMI can issue.
// Return true if UseMI has any physreg operands.
@@ -681,7 +681,7 @@ struct LiveRegUnit {
LiveRegUnit(unsigned RU) : RegUnit(RU), Cycle(0), MI(nullptr), Op(0) {}
};
-}
+} // namespace
// Identify physreg dependencies for UseMI, and update the live regunit
// tracking set when scanning instructions downwards.
diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp
index ca35ec5fdcf8..72a67690614c 100644
--- a/lib/CodeGen/MachineVerifier.cpp
+++ b/lib/CodeGen/MachineVerifier.cpp
@@ -258,7 +258,7 @@ namespace {
}
};
-}
+} // namespace
char MachineVerifierPass::ID = 0;
INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
@@ -1710,7 +1710,7 @@ namespace {
bool EntryIsSetup;
bool ExitIsSetup;
};
-}
+} // namespace
/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
/// by a FrameDestroy <n>, stack adjustments are identical on all
diff --git a/lib/CodeGen/OptimizePHIs.cpp b/lib/CodeGen/OptimizePHIs.cpp
index a1042e720c37..9780d75eb0d2 100644
--- a/lib/CodeGen/OptimizePHIs.cpp
+++ b/lib/CodeGen/OptimizePHIs.cpp
@@ -55,7 +55,7 @@ namespace {
bool IsDeadPHICycle(MachineInstr *MI, InstrSet &PHIsInCycle);
bool OptimizeBB(MachineBasicBlock &MBB);
};
-}
+} // namespace
char OptimizePHIs::ID = 0;
char &llvm::OptimizePHIsID = OptimizePHIs::ID;
diff --git a/lib/CodeGen/PHIElimination.cpp b/lib/CodeGen/PHIElimination.cpp
index d51419083d06..471c78a6d8ca 100644
--- a/lib/CodeGen/PHIElimination.cpp
+++ b/lib/CodeGen/PHIElimination.cpp
@@ -88,8 +88,8 @@ namespace {
// These functions are temporary abstractions around LiveVariables and
// LiveIntervals, so they can go away when LiveVariables does.
- bool isLiveIn(unsigned Reg, MachineBasicBlock *MBB);
- bool isLiveOutPastPHIs(unsigned Reg, MachineBasicBlock *MBB);
+ bool isLiveIn(unsigned Reg, const MachineBasicBlock *MBB);
+ bool isLiveOutPastPHIs(unsigned Reg, const MachineBasicBlock *MBB);
typedef std::pair<unsigned, unsigned> BBVRegPair;
typedef DenseMap<BBVRegPair, unsigned> VRegPHIUse;
@@ -104,7 +104,7 @@ namespace {
MachineInstrExpressionTrait> LoweredPHIMap;
LoweredPHIMap LoweredPHIs;
};
-}
+} // namespace
STATISTIC(NumLowered, "Number of phis lowered");
STATISTIC(NumCriticalEdgesSplit, "Number of critical edges split");
@@ -143,16 +143,16 @@ bool PHIElimination::runOnMachineFunction(MachineFunction &MF) {
// updating LiveIntervals, so we disable it.
if (!DisableEdgeSplitting && (LV || LIS)) {
MachineLoopInfo *MLI = getAnalysisIfAvailable<MachineLoopInfo>();
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
- Changed |= SplitPHIEdges(MF, *I, MLI);
+ for (auto &MBB : MF)
+ Changed |= SplitPHIEdges(MF, MBB, MLI);
}
// Populate VRegPHIUseCount
analyzePHINodes(MF);
// Eliminate PHI instructions by inserting copies into predecessor blocks.
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
- Changed |= EliminatePHINodes(MF, *I);
+ for (auto &MBB : MF)
+ Changed |= EliminatePHINodes(MF, MBB);
// Remove dead IMPLICIT_DEF instructions.
for (MachineInstr *DefMI : ImpDefs) {
@@ -623,7 +623,7 @@ bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
return Changed;
}
-bool PHIElimination::isLiveIn(unsigned Reg, MachineBasicBlock *MBB) {
+bool PHIElimination::isLiveIn(unsigned Reg, const MachineBasicBlock *MBB) {
assert((LV || LIS) &&
"isLiveIn() requires either LiveVariables or LiveIntervals");
if (LIS)
@@ -632,7 +632,8 @@ bool PHIElimination::isLiveIn(unsigned Reg, MachineBasicBlock *MBB) {
return LV->isLiveIn(Reg, *MBB);
}
-bool PHIElimination::isLiveOutPastPHIs(unsigned Reg, MachineBasicBlock *MBB) {
+bool PHIElimination::isLiveOutPastPHIs(unsigned Reg,
+ const MachineBasicBlock *MBB) {
assert((LV || LIS) &&
"isLiveOutPastPHIs() requires either LiveVariables or LiveIntervals");
// LiveVariables considers uses in PHIs to be in the predecessor basic block,
@@ -642,11 +643,9 @@ bool PHIElimination::isLiveOutPastPHIs(unsigned Reg, MachineBasicBlock *MBB) {
// out of the block.
if (LIS) {
const LiveInterval &LI = LIS->getInterval(Reg);
- for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI) {
- if (LI.liveAt(LIS->getMBBStartIdx(*SI)))
+ for (const MachineBasicBlock *SI : MBB->successors())
+ if (LI.liveAt(LIS->getMBBStartIdx(SI)))
return true;
- }
return false;
} else {
return LV->isLiveOut(Reg, *MBB);
diff --git a/lib/CodeGen/Passes.cpp b/lib/CodeGen/Passes.cpp
index 4cd86e66c0e8..210a7a1649cd 100644
--- a/lib/CodeGen/Passes.cpp
+++ b/lib/CodeGen/Passes.cpp
@@ -24,6 +24,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
@@ -72,6 +73,10 @@ static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
cl::desc("Disable Copy Propagation pass"));
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
+static cl::opt<bool> EnableImplicitNullChecks(
+ "enable-implicit-null-checks",
+ cl::desc("Fold null checks into faulting memory operations"),
+ cl::init(false));
static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
cl::desc("Print LLVM IR produced by the loop-reduce pass"));
static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
@@ -452,6 +457,9 @@ void TargetPassConfig::addCodeGenPrepare() {
void TargetPassConfig::addISelPrepare() {
addPreISel();
+ // Add both the safe stack and the stack protection passes: each of them will
+ // only protect functions that have corresponding attributes.
+ addPass(createSafeStackPass());
addPass(createStackProtectorPass(TM));
if (PrintISelInput)
@@ -543,6 +551,9 @@ void TargetPassConfig::addMachinePasses() {
// Run pre-sched2 passes.
addPreSched2();
+ if (EnableImplicitNullChecks)
+ addPass(&ImplicitNullChecksID);
+
// Second pass scheduler.
if (getOptLevel() != CodeGenOpt::None) {
if (MISchedPostRA)
diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp
index ebe05e3f2731..71c0a64325ba 100644
--- a/lib/CodeGen/PeepholeOptimizer.cpp
+++ b/lib/CodeGen/PeepholeOptimizer.cpp
@@ -293,7 +293,7 @@ namespace {
/// register of the last source.
unsigned getReg() const { return Reg; }
};
-}
+} // namespace
char PeepholeOptimizer::ID = 0;
char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID;
diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp
index 55f08e496de1..6760b5f95097 100644
--- a/lib/CodeGen/PostRASchedulerList.cpp
+++ b/lib/CodeGen/PostRASchedulerList.cpp
@@ -184,7 +184,7 @@ namespace {
void dumpSchedule() const;
void emitNoop(unsigned CurCycle);
};
-}
+} // namespace
char &llvm::PostRASchedulerID = PostRAScheduler::ID;
@@ -257,7 +257,7 @@ bool PostRAScheduler::enablePostRAScheduler(
TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
Mode = ST.getAntiDepBreakMode();
ST.getCriticalPathRCs(CriticalPathRCs);
- return ST.enablePostMachineScheduler() &&
+ return ST.enablePostRAScheduler() &&
OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
}
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index fd3d4d78968b..4a466381b9db 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -194,7 +194,7 @@ namespace {
bool setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg);
};
char RAFast::ID = 0;
-}
+} // namespace
/// getStackSpaceFor - This allocates space for the specified virtual register
/// to be held on the stack.
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index e513a4f1ccf5..e2061fe1dbae 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -2633,7 +2633,8 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) {
// "overflow bit" 32. As a workaround we drop all subregister ranges
// which means we loose some precision but are back to a well defined
// state.
- assert((CP.getNewRC()->getLaneMask() & 0x80000000u)
+ assert(TargetRegisterInfo::isImpreciseLaneMask(
+ CP.getNewRC()->getLaneMask())
&& "SubRange merge should only fail when merging into bit 32.");
DEBUG(dbgs() << "\tSubrange join aborted!\n");
LHS.clearSubRanges();
@@ -2696,7 +2697,7 @@ struct MBBPriorityInfo {
MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit)
: MBB(mbb), Depth(depth), IsSplit(issplit) {}
};
-}
+} // namespace
/// C-style comparator that sorts first based on the loop depth of the basic
/// block (the unsigned), and then on the MBB number.
diff --git a/lib/CodeGen/RegisterCoalescer.h b/lib/CodeGen/RegisterCoalescer.h
index 04067a1427af..4ba74417a16c 100644
--- a/lib/CodeGen/RegisterCoalescer.h
+++ b/lib/CodeGen/RegisterCoalescer.h
@@ -111,6 +111,6 @@ namespace llvm {
/// Return the register class of the coalesced register.
const TargetRegisterClass *getNewRC() const { return NewRC; }
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp
index a34bd6341d22..4176686d1f7f 100644
--- a/lib/CodeGen/RegisterScavenging.cpp
+++ b/lib/CodeGen/RegisterScavenging.cpp
@@ -103,10 +103,6 @@ void RegScavenger::determineKillsAndDefs() {
// Find out which registers are early clobbered, killed, defined, and marked
// def-dead in this instruction.
- // FIXME: The scavenger is not predication aware. If the instruction is
- // predicated, conservatively assume "kill" markers do not actually kill the
- // register. Similarly ignores "dead" markers.
- bool isPred = TII->isPredicated(MI);
KillRegUnits.reset();
DefRegUnits.reset();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -124,7 +120,7 @@ void RegScavenger::determineKillsAndDefs() {
}
// Apply the mask.
- (isPred ? DefRegUnits : KillRegUnits) |= TmpRegUnits;
+ KillRegUnits |= TmpRegUnits;
}
if (!MO.isReg())
continue;
@@ -136,11 +132,11 @@ void RegScavenger::determineKillsAndDefs() {
// Ignore undef uses.
if (MO.isUndef())
continue;
- if (!isPred && MO.isKill())
+ if (MO.isKill())
addRegUnits(KillRegUnits, Reg);
} else {
assert(MO.isDef());
- if (!isPred && MO.isDead())
+ if (MO.isDead())
addRegUnits(KillRegUnits, Reg);
else
addRegUnits(DefRegUnits, Reg);
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index e8e47b764dd2..ae4b935d719a 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -574,11 +574,11 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset;
int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset;
- AliasAnalysis::AliasResult AAResult = AA->alias(
- AliasAnalysis::Location(MMOa->getValue(), Overlapa,
- UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
- AliasAnalysis::Location(MMOb->getValue(), Overlapb,
- UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
+ AliasAnalysis::AliasResult AAResult =
+ AA->alias(MemoryLocation(MMOa->getValue(), Overlapa,
+ UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
+ MemoryLocation(MMOb->getValue(), Overlapb,
+ UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
return (AAResult != AliasAnalysis::NoAlias);
}
@@ -1508,7 +1508,7 @@ public:
return getCurr()->Preds.end();
}
};
-} // anonymous
+} // namespace
static bool hasDataSucc(const SUnit *SU) {
for (SUnit::const_succ_iterator
diff --git a/lib/CodeGen/ScheduleDAGPrinter.cpp b/lib/CodeGen/ScheduleDAGPrinter.cpp
index b2e4617720ff..cdf27ae5fedd 100644
--- a/lib/CodeGen/ScheduleDAGPrinter.cpp
+++ b/lib/CodeGen/ScheduleDAGPrinter.cpp
@@ -72,7 +72,7 @@ namespace llvm {
return G->addCustomGraphFeatures(GW);
}
};
-}
+} // namespace llvm
std::string DOTGraphTraits<ScheduleDAG*>::getNodeLabel(const SUnit *SU,
const ScheduleDAG *G) {
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index a71c6761c75f..5fea52c97496 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -255,6 +255,7 @@ namespace {
SDValue visitSRA(SDNode *N);
SDValue visitSRL(SDNode *N);
SDValue visitRotate(SDNode *N);
+ SDValue visitBSWAP(SDNode *N);
SDValue visitCTLZ(SDNode *N);
SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTTZ(SDNode *N);
@@ -387,6 +388,13 @@ namespace {
unsigned SequenceNum;
};
+ /// This is a helper function for MergeStoresOfConstantsOrVecElts. Returns a
+ /// constant build_vector of the stored constant values in Stores.
+ SDValue getMergedConstantVectorStore(SelectionDAG &DAG,
+ SDLoc SL,
+ ArrayRef<MemOpLink> Stores,
+ EVT Ty) const;
+
/// This is a helper function for MergeConsecutiveStores. When the source
/// elements of the consecutive stores are all constants or all extracted
/// vector elements, try to merge them into one larger store.
@@ -395,6 +403,13 @@ namespace {
EVT MemVT, unsigned NumElem,
bool IsConstantSrc, bool UseVector);
+ /// This is a helper function for MergeConsecutiveStores.
+ /// Stores that may be merged are placed in StoreNodes.
+ /// Loads that may alias with those stores are placed in AliasLoadNodes.
+ void getStoreMergeAndAliasCandidates(
+ StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes,
+ SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes);
+
/// Merge consecutive store operations into a wide store.
/// This optimization uses wide integers or vectors when possible.
/// \return True if some memory operations were changed.
@@ -444,7 +459,7 @@ namespace {
return TLI.getSetCCResultType(*DAG.getContext(), VT);
}
};
-}
+} // namespace
namespace {
@@ -460,7 +475,7 @@ public:
DC.removeFromWorklist(N);
}
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
// TargetLowering::DAGCombinerInfo implementation
@@ -1335,6 +1350,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::SRL: return visitSRL(N);
case ISD::ROTR:
case ISD::ROTL: return visitRotate(N);
+ case ISD::BSWAP: return visitBSWAP(N);
case ISD::CTLZ: return visitCTLZ(N);
case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
case ISD::CTTZ: return visitCTTZ(N);
@@ -1454,12 +1470,9 @@ SDValue DAGCombiner::combine(SDNode *N) {
if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) {
SDValue Ops[] = {N1, N0};
SDNode *CSENode;
- if (const BinaryWithFlagsSDNode *BinNode =
- dyn_cast<BinaryWithFlagsSDNode>(N)) {
+ if (const auto *BinNode = dyn_cast<BinaryWithFlagsSDNode>(N)) {
CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops,
- BinNode->Flags.hasNoUnsignedWrap(),
- BinNode->Flags.hasNoSignedWrap(),
- BinNode->Flags.hasExact());
+ &BinNode->Flags);
} else {
CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops);
}
@@ -4764,6 +4777,19 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitBSWAP(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ // fold (bswap c1) -> c2
+ if (isConstantIntBuildVectorOrConstantInt(N0))
+ return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0);
+ // fold (bswap (bswap x)) -> x
+ if (N0.getOpcode() == ISD::BSWAP)
+ return N0->getOperand(0);
+ return SDValue();
+}
+
SDValue DAGCombiner::visitCTLZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -5141,7 +5167,7 @@ SDValue DAGCombiner::visitMSCATTER(SDNode *N) {
std::tie(IndexLo, IndexHi) = DAG.SplitVector(MSC->getIndex(), DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
- getMachineMemOperand(MSC->getPointerInfo(),
+ getMachineMemOperand(MSC->getPointerInfo(),
MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
Alignment, MSC->getAAInfo(), MSC->getRanges());
@@ -5280,7 +5306,7 @@ SDValue DAGCombiner::visitMGATHER(SDNode *N) {
std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
- getMachineMemOperand(MGT->getPointerInfo(),
+ getMachineMemOperand(MGT->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MGT->getAAInfo(), MGT->getRanges());
@@ -8078,7 +8104,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
auto *BV00 = dyn_cast<BuildVectorSDNode>(N00);
auto *BV01 = dyn_cast<BuildVectorSDNode>(N01);
-
+
// Check 1: Make sure that the first operand of the inner multiply is NOT
// a constant. Otherwise, we may induce infinite looping.
if (!(isConstOrConstSplatFP(N00) || (BV00 && BV00->isConstant()))) {
@@ -9928,7 +9954,7 @@ struct LoadedSlice {
return true;
}
};
-}
+} // namespace
/// \brief Check that all bits set in \p UsedBits form a dense region, i.e.,
/// \p UsedBits looks like 0..0 1..1 0..0.
@@ -10576,6 +10602,18 @@ struct BaseIndexOffset {
};
} // namespace
+SDValue DAGCombiner::getMergedConstantVectorStore(SelectionDAG &DAG,
+ SDLoc SL,
+ ArrayRef<MemOpLink> Stores,
+ EVT Ty) const {
+ SmallVector<SDValue, 8> BuildVector;
+
+ for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I)
+ BuildVector.push_back(cast<StoreSDNode>(Stores[I].MemNode)->getValue());
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SL, Ty, BuildVector);
+}
+
bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT,
unsigned NumElem, bool IsConstantSrc, bool UseVector) {
@@ -10606,12 +10644,7 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
assert(TLI.isTypeLegal(Ty) && "Illegal vector store");
if (IsConstantSrc) {
- // A vector store with a constant source implies that the constant is
- // zero; we only handle merging stores of constant zeros because the zero
- // can be materialized without a load.
- // It may be beneficial to loosen this restriction to allow non-zero
- // store merging.
- StoredVal = DAG.getConstant(0, DL, Ty);
+ StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Ty);
} else {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElem ; ++i) {
@@ -10631,8 +10664,8 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
// elements, so this path implies a store of constants.
assert(IsConstantSrc && "Merged vector elements should use vector store");
- unsigned StoreBW = NumElem * ElementSizeBytes * 8;
- APInt StoreInt(StoreBW, 0);
+ unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
+ APInt StoreInt(SizeInBits, 0);
// Construct a single integer constant which is made of the smaller
// constant inputs.
@@ -10641,18 +10674,18 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
unsigned Idx = IsLE ? (NumElem - 1 - i) : i;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
SDValue Val = St->getValue();
- StoreInt <<= ElementSizeBytes*8;
+ StoreInt <<= ElementSizeBytes * 8;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
- StoreInt |= C->getAPIntValue().zext(StoreBW);
+ StoreInt |= C->getAPIntValue().zext(SizeInBits);
} else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
- StoreInt |= C->getValueAPF().bitcastToAPInt().zext(StoreBW);
+ StoreInt |= C->getValueAPF().bitcastToAPInt().zext(SizeInBits);
} else {
llvm_unreachable("Invalid constant element type");
}
}
// Create the new Load and Store operations.
- EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
StoredVal = DAG.getConstant(StoreInt, DL, StoreTy);
}
@@ -10698,62 +10731,25 @@ static bool allowableAlignment(const SelectionDAG &DAG,
return (Align >= ABIAlignment);
}
-bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
- if (OptLevel == CodeGenOpt::None)
- return false;
-
- EVT MemVT = St->getMemoryVT();
- int64_t ElementSizeBytes = MemVT.getSizeInBits()/8;
- bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute(
- Attribute::NoImplicitFloat);
-
- // This function cannot currently deal with non-byte-sized memory sizes.
- if (ElementSizeBytes * 8 != MemVT.getSizeInBits())
- return false;
-
- // Don't merge vectors into wider inputs.
- if (MemVT.isVector() || !MemVT.isSimple())
- return false;
-
- // Perform an early exit check. Do not bother looking at stored values that
- // are not constants, loads, or extracted vector elements.
- SDValue StoredVal = St->getValue();
- bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
- bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
- isa<ConstantFPSDNode>(StoredVal);
- bool IsExtractVecEltSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT);
-
- if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecEltSrc)
- return false;
-
- // Only look at ends of store sequences.
- SDValue Chain = SDValue(St, 0);
- if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
- return false;
-
+void DAGCombiner::getStoreMergeAndAliasCandidates(
+ StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes,
+ SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes) {
// This holds the base pointer, index, and the offset in bytes from the base
// pointer.
BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr());
// We must have a base and an offset.
if (!BasePtr.Base.getNode())
- return false;
+ return;
// Do not handle stores to undef base pointers.
if (BasePtr.Base.getOpcode() == ISD::UNDEF)
- return false;
-
- // Save the LoadSDNodes that we find in the chain.
- // We need to make sure that these nodes do not interfere with
- // any of the store nodes.
- SmallVector<LSBaseSDNode*, 8> AliasLoadNodes;
-
- // Save the StoreSDNodes that we find in the chain.
- SmallVector<MemOpLink, 8> StoreNodes;
+ return;
// Walk up the chain and look for nodes with offsets from the same
// base pointer. Stop when reaching an instruction with a different kind
// or instruction which has a different base pointer.
+ EVT MemVT = St->getMemoryVT();
unsigned Seq = 0;
StoreSDNode *Index = St;
while (Index) {
@@ -10810,7 +10806,51 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
}
}
}
+}
+
+bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
+ if (OptLevel == CodeGenOpt::None)
+ return false;
+
+ EVT MemVT = St->getMemoryVT();
+ int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8;
+ bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ Attribute::NoImplicitFloat);
+
+ // This function cannot currently deal with non-byte-sized memory sizes.
+ if (ElementSizeBytes * 8 != MemVT.getSizeInBits())
+ return false;
+
+ // Don't merge vectors into wider inputs.
+ if (MemVT.isVector() || !MemVT.isSimple())
+ return false;
+
+ // Perform an early exit check. Do not bother looking at stored values that
+ // are not constants, loads, or extracted vector elements.
+ SDValue StoredVal = St->getValue();
+ bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
+ bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
+ isa<ConstantFPSDNode>(StoredVal);
+ bool IsExtractVecEltSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT);
+
+ if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecEltSrc)
+ return false;
+
+ // Only look at ends of store sequences.
+ SDValue Chain = SDValue(St, 0);
+ if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
+ return false;
+
+ // Save the LoadSDNodes that we find in the chain.
+ // We need to make sure that these nodes do not interfere with
+ // any of the store nodes.
+ SmallVector<LSBaseSDNode*, 8> AliasLoadNodes;
+
+ // Save the StoreSDNodes that we find in the chain.
+ SmallVector<MemOpLink, 8> StoreNodes;
+ getStoreMergeAndAliasCandidates(St, StoreNodes, AliasLoadNodes);
+
// Check if there is anything to merge.
if (StoreNodes.size() < 2)
return false;
@@ -10876,8 +10916,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
}
// Find a legal type for the constant store.
- unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
- EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
+ EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS,
FirstStoreAlign)) {
@@ -11039,8 +11079,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
}
// Find a legal type for the integer store.
- unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
- StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
+ StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS, FirstStoreAlign) &&
allowableAlignment(DAG, TLI, StoreTy, FirstLoadAS, FirstLoadAlign))
@@ -11094,8 +11134,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
if (UseVectorTy) {
JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
} else {
- unsigned StoreBW = NumElem * ElementSizeBytes * 8;
- JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
+ JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
}
SDLoc LoadDL(LoadNodes[0].MemNode);
@@ -12093,7 +12133,7 @@ static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {
}
// If any of the operands is a floating point scalar bitcast to a vector,
- // use floating point types throughout, and bitcast everything.
+ // use floating point types throughout, and bitcast everything.
// Replace UNDEFs by another scalar UNDEF node, of the final desired type.
if (AnyFP) {
SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
@@ -12924,7 +12964,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
SDValue RHS = N->getOperand(1);
SDLoc dl(N);
- // Make sure we're not running after operation legalization where it
+ // Make sure we're not running after operation legalization where it
// may have custom lowered the vector shuffles.
if (LegalOperations)
return SDValue();
@@ -13845,12 +13885,10 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) +
Op1->getSrcValueOffset() - MinOffset;
AliasAnalysis::AliasResult AAResult =
- AA.alias(AliasAnalysis::Location(Op0->getMemOperand()->getValue(),
- Overlap1,
- UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
- AliasAnalysis::Location(Op1->getMemOperand()->getValue(),
- Overlap2,
- UseTBAA ? Op1->getAAInfo() : AAMDNodes()));
+ AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap1,
+ UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
+ MemoryLocation(Op1->getMemOperand()->getValue(), Overlap2,
+ UseTBAA ? Op1->getAAInfo() : AAMDNodes()));
if (AAResult == AliasAnalysis::NoAlias)
return false;
}
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index f3d75cb32a7d..ecaa2c972719 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -259,20 +259,27 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
// If this is an MSVC EH personality, we need to do a bit more work.
EHPersonality Personality = EHPersonality::Unknown;
- if (!LPads.empty())
- Personality = classifyEHPersonality(LPads.back()->getPersonalityFn());
+ if (Fn->hasPersonalityFn())
+ Personality = classifyEHPersonality(Fn->getPersonalityFn());
if (!isMSVCEHPersonality(Personality))
return;
- if (Personality == EHPersonality::MSVC_Win64SEH) {
+ if (Personality == EHPersonality::MSVC_Win64SEH ||
+ Personality == EHPersonality::MSVC_X86SEH) {
addSEHHandlersForLPads(LPads);
- } else if (Personality == EHPersonality::MSVC_CXX) {
+ }
+
+ WinEHFuncInfo &EHInfo = MMI.getWinEHFuncInfo(&fn);
+ if (Personality == EHPersonality::MSVC_CXX) {
const Function *WinEHParentFn = MMI.getWinEHParent(&fn);
- WinEHFuncInfo &EHInfo = MMI.getWinEHFuncInfo(WinEHParentFn);
calculateWinCXXEHStateNumbers(WinEHParentFn, EHInfo);
+ }
- // Copy the state numbers to LandingPadInfo for the current function, which
- // could be a handler or the parent.
+ // Copy the state numbers to LandingPadInfo for the current function, which
+ // could be a handler or the parent. This should happen for 32-bit SEH and
+ // C++ EH.
+ if (Personality == EHPersonality::MSVC_CXX ||
+ Personality == EHPersonality::MSVC_X86SEH) {
for (const LandingPadInst *LP : LPads) {
MachineBasicBlock *LPadMBB = MBBMap[LP->getParent()];
MMI.addWinEHState(LPadMBB, EHInfo.LandingPadStateMap[LP]);
@@ -539,8 +546,10 @@ void llvm::ComputeUsesVAFloatArgument(const CallInst &I,
/// landingpad instruction and add them to the specified machine module info.
void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
MachineBasicBlock *MBB) {
- MMI.addPersonality(MBB,
- cast<Function>(I.getPersonalityFn()->stripPointerCasts()));
+ MMI.addPersonality(
+ MBB,
+ cast<Function>(
+ I.getParent()->getParent()->getPersonalityFn()->stripPointerCasts()));
if (I.isCleanup())
MMI.addCleanup(MBB);
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.h b/lib/CodeGen/SelectionDAG/InstrEmitter.h
index 7b86f7dd8de0..2a61914eecd3 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.h
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.h
@@ -140,6 +140,6 @@ private:
DenseMap<SDValue, unsigned> &VRBaseMap);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 7d98872f8af1..37f95e5a22b9 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -198,7 +198,7 @@ public:
ReplacedNode(Old);
}
};
-}
+} // namespace
/// Return a vector shuffle operation which
/// performs the same shuffe in terms of order or result bytes, but on a type
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index 9c297698c1db..c3e3b7c525b9 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -676,7 +676,7 @@ namespace {
NodesToAnalyze.insert(N);
}
};
-}
+} // namespace
/// ReplaceValueWith - The specified value was legalized to the specified other
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index c06227bd9701..50ad2391d15b 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1010,7 +1010,7 @@ SDValue VectorLegalizer::UnrollVSETCC(SDValue Op) {
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
}
-}
+} // namespace
bool SelectionDAG::LegalizeVectors() {
return VectorLegalizer(*this).Run();
diff --git a/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h b/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
index c27f8de601f2..949353256938 100644
--- a/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
+++ b/lib/CodeGen/SelectionDAG/SDNodeDbgValue.h
@@ -119,6 +119,6 @@ public:
bool isInvalidated() const { return Invalid; }
};
-} // end llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
index 6351fa2c4a2f..4c74182014a0 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
@@ -180,6 +180,6 @@ namespace llvm {
void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap,
MachineBasicBlock::iterator InsertPos);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index cf51e756d847..0eff930ceddd 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -400,19 +400,24 @@ static void AddNodeIDOperands(FoldingSetNodeID &ID,
ID.AddInteger(Op.getResNo());
}
}
+/// Add logical or fast math flag values to FoldingSetNodeID value.
+static void AddNodeIDFlags(FoldingSetNodeID &ID, unsigned Opcode,
+ const SDNodeFlags *Flags) {
+ if (!Flags || !isBinOpWithFlags(Opcode))
+ return;
-static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, bool nuw, bool nsw,
- bool exact) {
- ID.AddBoolean(nuw);
- ID.AddBoolean(nsw);
- ID.AddBoolean(exact);
+ unsigned RawFlags = Flags->getRawFlags();
+ // If no flags are set, do not alter the ID. We must match the ID of nodes
+ // that were created without explicitly specifying flags. This also saves time
+ // and allows a gradual increase in API usage of the optional optimization
+ // flags.
+ if (RawFlags != 0)
+ ID.AddInteger(RawFlags);
}
-/// AddBinaryNodeIDCustom - Add BinarySDNodes special infos
-static void AddBinaryNodeIDCustom(FoldingSetNodeID &ID, unsigned Opcode,
- bool nuw, bool nsw, bool exact) {
- if (isBinOpWithFlags(Opcode))
- AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
+static void AddNodeIDFlags(FoldingSetNodeID &ID, const SDNode *N) {
+ if (auto *Node = dyn_cast<BinaryWithFlagsSDNode>(N))
+ AddNodeIDFlags(ID, Node->getOpcode(), &Node->Flags);
}
static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
@@ -507,20 +512,6 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
ID.AddInteger(ST->getPointerInfo().getAddrSpace());
break;
}
- case ISD::SDIV:
- case ISD::UDIV:
- case ISD::SRA:
- case ISD::SRL:
- case ISD::MUL:
- case ISD::ADD:
- case ISD::SUB:
- case ISD::SHL: {
- const BinaryWithFlagsSDNode *BinNode = cast<BinaryWithFlagsSDNode>(N);
- AddBinaryNodeIDCustom(
- ID, N->getOpcode(), BinNode->Flags.hasNoUnsignedWrap(),
- BinNode->Flags.hasNoSignedWrap(), BinNode->Flags.hasExact());
- break;
- }
case ISD::ATOMIC_CMP_SWAP:
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
case ISD::ATOMIC_SWAP:
@@ -564,6 +555,8 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
}
} // end switch (N->getOpcode())
+ AddNodeIDFlags(ID, N);
+
// Target specific memory nodes could also have address spaces to check.
if (N->isTargetMemoryOpcode())
ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
@@ -960,14 +953,16 @@ void SelectionDAG::allnodes_clear() {
BinarySDNode *SelectionDAG::GetBinarySDNode(unsigned Opcode, SDLoc DL,
SDVTList VTs, SDValue N1,
- SDValue N2, bool nuw, bool nsw,
- bool exact) {
+ SDValue N2,
+ const SDNodeFlags *Flags) {
if (isBinOpWithFlags(Opcode)) {
+ // If no flags were passed in, use a default flags object.
+ SDNodeFlags F;
+ if (Flags == nullptr)
+ Flags = &F;
+
BinaryWithFlagsSDNode *FN = new (NodeAllocator) BinaryWithFlagsSDNode(
- Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2);
- FN->Flags.setNoUnsignedWrap(nuw);
- FN->Flags.setNoSignedWrap(nsw);
- FN->Flags.setExact(exact);
+ Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, *Flags);
return FN;
}
@@ -2932,6 +2927,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
case ISD::TRUNCATE:
case ISD::UINT_TO_FP:
case ISD::SINT_TO_FP:
+ case ISD::BSWAP:
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTTZ:
@@ -3081,6 +3077,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL,
if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT);
break;
+ case ISD::BSWAP:
+ assert(VT.isInteger() && VT == Operand.getValueType() &&
+ "Invalid BSWAP!");
+ assert((VT.getScalarSizeInBits() % 16 == 0) &&
+ "BSWAP types must be a multiple of 16 bits!");
+ if (OpOpcode == ISD::UNDEF)
+ return getUNDEF(VT);
+ break;
case ISD::BITCAST:
// Basic sanity checking.
assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
@@ -3260,7 +3264,7 @@ SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
}
SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
- SDValue N2, bool nuw, bool nsw, bool exact) {
+ SDValue N2, const SDNodeFlags *Flags) {
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
switch (Opcode) {
@@ -3747,22 +3751,20 @@ SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1,
// Memoize this node if possible.
BinarySDNode *N;
SDVTList VTs = getVTList(VT);
- const bool BinOpHasFlags = isBinOpWithFlags(Opcode);
if (VT != MVT::Glue) {
SDValue Ops[] = {N1, N2};
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops);
- if (BinOpHasFlags)
- AddBinaryNodeIDCustom(ID, Opcode, nuw, nsw, exact);
+ AddNodeIDFlags(ID, Opcode, Flags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DL.getDebugLoc(), IP))
return SDValue(E, 0);
- N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
+ N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
CSEMap.InsertNode(N, IP);
} else {
- N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, nuw, nsw, exact);
+ N = GetBinarySDNode(Opcode, DL, VTs, N1, N2, Flags);
}
InsertNode(N);
@@ -4023,10 +4025,10 @@ static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
}
-/// FindOptimalMemOpLowering - Determines the optimial series memory ops
-/// to replace the memset / memcpy. Return true if the number of memory ops
-/// is below the threshold. It returns the types of the sequence of
-/// memory ops to perform memset / memcpy by reference.
+/// Determines the optimal series of memory ops to replace the memset / memcpy.
+/// Return true if the number of memory ops is below the threshold (Limit).
+/// It returns the types of the sequence of memory ops to perform
+/// memset / memcpy by reference.
static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
unsigned Limit, uint64_t Size,
unsigned DstAlign, unsigned SrcAlign,
@@ -6066,13 +6068,12 @@ SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
/// getNodeIfExists - Get the specified node if it's already available, or
/// else return NULL.
SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
- ArrayRef<SDValue> Ops, bool nuw, bool nsw,
- bool exact) {
+ ArrayRef<SDValue> Ops,
+ const SDNodeFlags *Flags) {
if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops);
- if (isBinOpWithFlags(Opcode))
- AddBinaryNodeIDCustom(ID, nuw, nsw, exact);
+ AddNodeIDFlags(ID, Opcode, Flags);
void *IP = nullptr;
if (SDNode *E = FindNodeOrInsertPos(ID, DebugLoc(), IP))
return E;
@@ -6133,7 +6134,7 @@ public:
: SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
};
-}
+} // namespace
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG.
@@ -6343,7 +6344,7 @@ namespace {
bool operator<(const UseMemo &L, const UseMemo &R) {
return (intptr_t)L.User < (intptr_t)R.User;
}
-}
+} // namespace
/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
/// uses of other values produced by From.getNode() alone. The same value
@@ -6588,7 +6589,7 @@ namespace {
VTs.push_back(MVT((MVT::SimpleValueType)i));
}
};
-}
+} // namespace
static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
static ManagedStatic<EVTArray> SimpleVTArray;
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 8ba957d62870..8313a48c3467 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -78,12 +78,16 @@ LimitFPPrecision("limit-float-precision",
cl::location(LimitFloatPrecision),
cl::init(0));
+static cl::opt<bool>
+EnableFMFInDAG("enable-fmf-dag", cl::init(false), cl::Hidden,
+ cl::desc("Enable fast-math-flags for DAG nodes"));
+
// Limit the width of DAG chains. This is important in general to prevent
-// prevent DAG-based analysis from blowing up. For example, alias analysis and
+// DAG-based analysis from blowing up. For example, alias analysis and
// load clustering may not complete in reasonable time. It is difficult to
// recognize and avoid this situation within each individual analysis, and
// future analyses are likely to have the same behavior. Limiting DAG width is
-// the safe approach, and will be especially important with global DAGs.
+// the safe approach and will be especially important with global DAGs.
//
// MaxParallelChains default is arbitrarily high to avoid affecting
// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
@@ -2148,6 +2152,8 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
bool nuw = false;
bool nsw = false;
bool exact = false;
+ FastMathFlags FMF;
+
if (const OverflowingBinaryOperator *OFBinOp =
dyn_cast<const OverflowingBinaryOperator>(&I)) {
nuw = OFBinOp->hasNoUnsignedWrap();
@@ -2156,9 +2162,22 @@ void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
if (const PossiblyExactOperator *ExactOp =
dyn_cast<const PossiblyExactOperator>(&I))
exact = ExactOp->isExact();
-
+ if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&I))
+ FMF = FPOp->getFastMathFlags();
+
+ SDNodeFlags Flags;
+ Flags.setExact(exact);
+ Flags.setNoSignedWrap(nsw);
+ Flags.setNoUnsignedWrap(nuw);
+ if (EnableFMFInDAG) {
+ Flags.setAllowReciprocal(FMF.allowReciprocal());
+ Flags.setNoInfs(FMF.noInfs());
+ Flags.setNoNaNs(FMF.noNaNs());
+ Flags.setNoSignedZeros(FMF.noSignedZeros());
+ Flags.setUnsafeAlgebra(FMF.unsafeAlgebra());
+ }
SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
- Op1, Op2, nuw, nsw, exact);
+ Op1, Op2, &Flags);
setValue(&I, BinNodeValue);
}
@@ -2206,9 +2225,12 @@ void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
dyn_cast<const PossiblyExactOperator>(&I))
exact = ExactOp->isExact();
}
-
+ SDNodeFlags Flags;
+ Flags.setExact(exact);
+ Flags.setNoSignedWrap(nsw);
+ Flags.setNoUnsignedWrap(nuw);
SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
- nuw, nsw, exact);
+ &Flags);
setValue(&I, Res);
}
@@ -2892,7 +2914,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
// Serialize volatile loads with other side effects.
Root = getRoot();
else if (AA->pointsToConstantMemory(
- AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), AAInfo))) {
+ MemoryLocation(SV, AA->getTypeStoreSize(Ty), AAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
Root = DAG.getEntryNode();
ConstantMemory = true;
@@ -2907,8 +2929,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
SmallVector<SDValue, 4> Values(NumValues);
- SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
- NumValues));
+ SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
EVT PtrVT = Ptr.getValueType();
unsigned ChainI = 0;
for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
@@ -2972,8 +2993,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
SDValue Ptr = getValue(PtrV);
SDValue Root = getRoot();
- SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
- NumValues));
+ SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
EVT PtrVT = Ptr.getValueType();
bool isVolatile = I.isVolatile();
bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
@@ -3141,10 +3161,8 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I) {
const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
SDValue InChain = DAG.getRoot();
- if (AA->pointsToConstantMemory(
- AliasAnalysis::Location(PtrOperand,
- AA->getTypeStoreSize(I.getType()),
- AAInfo))) {
+ if (AA->pointsToConstantMemory(MemoryLocation(
+ PtrOperand, AA->getTypeStoreSize(I.getType()), AAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
InChain = DAG.getEntryNode();
}
@@ -3186,10 +3204,9 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
Value *BasePtr = Ptr;
bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
bool ConstantMemory = false;
- if (UniformBase && AA->pointsToConstantMemory(
- AliasAnalysis::Location(BasePtr,
- AA->getTypeStoreSize(I.getType()),
- AAInfo))) {
+ if (UniformBase &&
+ AA->pointsToConstantMemory(
+ MemoryLocation(BasePtr, AA->getTypeStoreSize(I.getType()), AAInfo))) {
// Do not serialize (non-volatile) loads of constant memory with anything.
Root = DAG.getEntryNode();
ConstantMemory = true;
@@ -4983,6 +5000,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
assert(Reg && "cannot get exception code on this platform");
MVT PtrVT = TLI.getPointerTy();
const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
+ assert(FuncInfo.MBB->isLandingPad() && "eh.exceptioncode in non-lpad");
unsigned VReg = FuncInfo.MBB->addLiveIn(Reg, PtrRC);
SDValue N =
DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
@@ -7486,6 +7504,31 @@ void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
const int64_t N = Clusters.size();
const unsigned MinJumpTableSize = TLI.getMinimumJumpTableEntries();
+ // TotalCases[i]: Total nbr of cases in Clusters[0..i].
+ SmallVector<unsigned, 8> TotalCases(N);
+
+ for (unsigned i = 0; i < N; ++i) {
+ APInt Hi = Clusters[i].High->getValue();
+ APInt Lo = Clusters[i].Low->getValue();
+ TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
+ if (i != 0)
+ TotalCases[i] += TotalCases[i - 1];
+ }
+
+ if (N >= MinJumpTableSize && isDense(Clusters, &TotalCases[0], 0, N - 1)) {
+ // Cheap case: the whole range might be suitable for jump table.
+ CaseCluster JTCluster;
+ if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
+ Clusters[0] = JTCluster;
+ Clusters.resize(1);
+ return;
+ }
+ }
+
+ // The algorithm below is not suitable for -O0.
+ if (TM.getOptLevel() == CodeGenOpt::None)
+ return;
+
// Split Clusters into minimum number of dense partitions. The algorithm uses
// the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
// for the Case Statement'" (1994), but builds the MinPartitions array in
@@ -7499,16 +7542,6 @@ void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
SmallVector<unsigned, 8> LastElement(N);
// NumTables[i]: nbr of >= MinJumpTableSize partitions from Clusters[i..N-1].
SmallVector<unsigned, 8> NumTables(N);
- // TotalCases[i]: Total nbr of cases in Clusters[0..i].
- SmallVector<unsigned, 8> TotalCases(N);
-
- for (unsigned i = 0; i < N; ++i) {
- APInt Hi = Clusters[i].High->getValue();
- APInt Lo = Clusters[i].Low->getValue();
- TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
- if (i != 0)
- TotalCases[i] += TotalCases[i - 1];
- }
// Base case: There is only one way to partition Clusters[N-1].
MinPartitions[N - 1] = 1;
@@ -7696,6 +7729,10 @@ void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
#endif
+ // The algorithm below is not suitable for -O0.
+ if (TM.getOptLevel() == CodeGenOpt::None)
+ return;
+
// If target does not have legal shift left, do not emit bit tests at all.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT PTy = TLI.getPointerTy();
@@ -7959,6 +7996,18 @@ void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
}
}
+unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
+ CaseClusterIt First,
+ CaseClusterIt Last) {
+ return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
+ if (X.Weight != CC.Weight)
+ return X.Weight > CC.Weight;
+
+ // Ties are broken by comparing the case value.
+ return X.Low->getValue().slt(CC.Low->getValue());
+ });
+}
+
void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
const SwitchWorkListItem &W,
Value *Cond,
@@ -7988,6 +8037,48 @@ void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
RightWeight += (--FirstRight)->Weight;
I++;
}
+
+ for (;;) {
+ // Our binary search tree differs from a typical BST in that ours can have up
+ // to three values in each leaf. The pivot selection above doesn't take that
+ // into account, which means the tree might require more nodes and be less
+ // efficient. We compensate for this here.
+
+ unsigned NumLeft = LastLeft - W.FirstCluster + 1;
+ unsigned NumRight = W.LastCluster - FirstRight + 1;
+
+ if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
+ // If one side has less than 3 clusters, and the other has more than 3,
+ // consider taking a cluster from the other side.
+
+ if (NumLeft < NumRight) {
+ // Consider moving the first cluster on the right to the left side.
+ CaseCluster &CC = *FirstRight;
+ unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
+ unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
+ if (LeftSideRank <= RightSideRank) {
+ // Moving the cluster to the left does not demote it.
+ ++LastLeft;
+ ++FirstRight;
+ continue;
+ }
+ } else {
+ assert(NumRight < NumLeft);
+ // Consider moving the last element on the left to the right side.
+ CaseCluster &CC = *LastLeft;
+ unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
+ unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
+ if (RightSideRank <= LeftSideRank) {
+ // Moving the cluster to the right does not demot it.
+ --LastLeft;
+ --FirstRight;
+ continue;
+ }
+ }
+ }
+ break;
+ }
+
assert(LastLeft + 1 == FirstRight);
assert(LastLeft >= W.FirstCluster);
assert(FirstRight <= W.LastCluster);
@@ -8111,11 +8202,8 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
return;
}
- if (TM.getOptLevel() != CodeGenOpt::None) {
- findJumpTables(Clusters, &SI, DefaultMBB);
- findBitTestClusters(Clusters, &SI);
- }
-
+ findJumpTables(Clusters, &SI, DefaultMBB);
+ findBitTestClusters(Clusters, &SI);
DEBUG({
dbgs() << "Case clusters: ";
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index f0c03af3f64b..f225d54d189d 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -342,6 +342,11 @@ private:
};
typedef SmallVector<SwitchWorkListItem, 4> SwitchWorkList;
+ /// Determine the rank by weight of CC in [First,Last]. If CC has more weight
+ /// than each cluster in the range, its rank is 0.
+ static unsigned caseClusterRank(const CaseCluster &CC, CaseClusterIt First,
+ CaseClusterIt Last);
+
/// Emit comparison and split W into two subtrees.
void splitWorkItem(SwitchWorkList &WorkList, const SwitchWorkListItem &W,
Value *Cond, MachineBasicBlock *SwitchMBB);
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 22f592afae71..c5562cd31067 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -307,7 +307,7 @@ namespace llvm {
"Unknown sched type!");
return createILPListDAGScheduler(IS, OptLevel);
}
-}
+} // namespace llvm
// EmitInstrWithCustomInserter - This method should be implemented by targets
// that mark instructions with the 'usesCustomInserter' flag. These
@@ -938,8 +938,10 @@ bool SelectionDAGISel::PrepareEHLandingPad() {
// pad into several BBs.
const BasicBlock *LLVMBB = MBB->getBasicBlock();
const LandingPadInst *LPadInst = LLVMBB->getLandingPadInst();
- MF->getMMI().addPersonality(
- MBB, cast<Function>(LPadInst->getPersonalityFn()->stripPointerCasts()));
+ MF->getMMI().addPersonality(MBB, cast<Function>(LPadInst->getParent()
+ ->getParent()
+ ->getPersonalityFn()
+ ->stripPointerCasts()));
EHPersonality Personality = MF->getMMI().getPersonalityType();
if (isMSVCEHPersonality(Personality)) {
@@ -2540,7 +2542,7 @@ public:
J.setNode(E);
}
};
-}
+} // namespace
SDNode *SelectionDAGISel::
SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
index 4df5ede388fc..19b5d160c8a9 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp
@@ -132,7 +132,7 @@ namespace llvm {
"color=blue,style=dashed");
}
};
-}
+} // namespace llvm
std::string DOTGraphTraits<SelectionDAG*>::getNodeLabel(const SDNode *Node,
const SelectionDAG *G) {
diff --git a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
index 8bbfa01e7594..a6b3fc6c4d4a 100644
--- a/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
@@ -113,84 +113,137 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType,
llvm_unreachable("infinite loop?");
}
+/// Utility function for reservePreviousStackSlotForValue. Tries to find
+/// stack slot index to which we have spilled value for previous statepoints.
+/// LookUpDepth specifies maximum DFS depth this function is allowed to look.
+static Optional<int> findPreviousSpillSlot(const Value *Val,
+ SelectionDAGBuilder &Builder,
+ int LookUpDepth) {
+ // Can not look any futher - give up now
+ if (LookUpDepth <= 0)
+ return Optional<int>();
+
+ // Spill location is known for gc relocates
+ if (isGCRelocate(Val)) {
+ GCRelocateOperands RelocOps(cast<Instruction>(Val));
+
+ FunctionLoweringInfo::StatepointSpilledValueMapTy &SpillMap =
+ Builder.FuncInfo.StatepointRelocatedValues[RelocOps.getStatepoint()];
+
+ auto It = SpillMap.find(RelocOps.getDerivedPtr());
+ if (It == SpillMap.end())
+ return Optional<int>();
+
+ return It->second;
+ }
+
+ // Look through bitcast instructions.
+ if (const BitCastInst *Cast = dyn_cast<BitCastInst>(Val)) {
+ return findPreviousSpillSlot(Cast->getOperand(0), Builder, LookUpDepth - 1);
+ }
+
+ // Look through phi nodes
+ // All incoming values should have same known stack slot, otherwise result
+ // is unknown.
+ if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
+ Optional<int> MergedResult = None;
+
+ for (auto &IncomingValue : Phi->incoming_values()) {
+ Optional<int> SpillSlot =
+ findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
+ if (!SpillSlot.hasValue())
+ return Optional<int>();
+
+ if (MergedResult.hasValue() && *MergedResult != *SpillSlot)
+ return Optional<int>();
+
+ MergedResult = SpillSlot;
+ }
+ return MergedResult;
+ }
+
+ // TODO: We can do better for PHI nodes. In cases like this:
+ // ptr = phi(relocated_pointer, not_relocated_pointer)
+ // statepoint(ptr)
+ // We will return that stack slot for ptr is unknown. And later we might
+ // assign different stack slots for ptr and relocated_pointer. This limits
+ // llvm's ability to remove redundant stores.
+ // Unfortunately it's hard to accomplish in current infrastructure.
+ // We use this function to eliminate spill store completely, while
+ // in example we still need to emit store, but instead of any location
+ // we need to use special "preferred" location.
+
+ // TODO: handle simple updates. If a value is modified and the original
+ // value is no longer live, it would be nice to put the modified value in the
+ // same slot. This allows folding of the memory accesses for some
+ // instructions types (like an increment).
+ // statepoint (i)
+ // i1 = i+1
+ // statepoint (i1)
+ // However we need to be careful for cases like this:
+ // statepoint(i)
+ // i1 = i+1
+ // statepoint(i, i1)
+ // Here we want to reserve spill slot for 'i', but not for 'i+1'. If we just
+ // put handling of simple modifications in this function like it's done
+ // for bitcasts we might end up reserving i's slot for 'i+1' because order in
+ // which we visit values is unspecified.
+
+ // Don't know any information about this instruction
+ return Optional<int>();
+}
+
/// Try to find existing copies of the incoming values in stack slots used for
/// statepoint spilling. If we can find a spill slot for the incoming value,
/// mark that slot as allocated, and reuse the same slot for this safepoint.
/// This helps to avoid series of loads and stores that only serve to resuffle
/// values on the stack between calls.
-static void reservePreviousStackSlotForValue(SDValue Incoming,
+static void reservePreviousStackSlotForValue(const Value *IncomingValue,
SelectionDAGBuilder &Builder) {
+ SDValue Incoming = Builder.getValue(IncomingValue);
+
if (isa<ConstantSDNode>(Incoming) || isa<FrameIndexSDNode>(Incoming)) {
// We won't need to spill this, so no need to check for previously
// allocated stack slots
return;
}
- SDValue Loc = Builder.StatepointLowering.getLocation(Incoming);
- if (Loc.getNode()) {
+ SDValue OldLocation = Builder.StatepointLowering.getLocation(Incoming);
+ if (OldLocation.getNode())
// duplicates in input
return;
- }
-
- // Search back for the load from a stack slot pattern to find the original
- // slot we allocated for this value. We could extend this to deal with
- // simple modification patterns, but simple dealing with trivial load/store
- // sequences helps a lot already.
- if (LoadSDNode *Load = dyn_cast<LoadSDNode>(Incoming)) {
- if (auto *FI = dyn_cast<FrameIndexSDNode>(Load->getBasePtr())) {
- const int Index = FI->getIndex();
- auto Itr = std::find(Builder.FuncInfo.StatepointStackSlots.begin(),
- Builder.FuncInfo.StatepointStackSlots.end(), Index);
- if (Itr == Builder.FuncInfo.StatepointStackSlots.end()) {
- // not one of the lowering stack slots, can't reuse!
- // TODO: Actually, we probably could reuse the stack slot if the value
- // hasn't changed at all, but we'd need to look for intervening writes
- return;
- } else {
- // This is one of our dedicated lowering slots
- const int Offset =
- std::distance(Builder.FuncInfo.StatepointStackSlots.begin(), Itr);
- if (Builder.StatepointLowering.isStackSlotAllocated(Offset)) {
- // stack slot already assigned to someone else, can't use it!
- // TODO: currently we reserve space for gc arguments after doing
- // normal allocation for deopt arguments. We should reserve for
- // _all_ deopt and gc arguments, then start allocating. This
- // will prevent some moves being inserted when vm state changes,
- // but gc state doesn't between two calls.
- return;
- }
- // Reserve this stack slot
- Builder.StatepointLowering.reserveStackSlot(Offset);
- }
- // Cache this slot so we find it when going through the normal
- // assignment loop.
- SDValue Loc =
- Builder.DAG.getTargetFrameIndex(Index, Incoming.getValueType());
+ const int LookUpDepth = 6;
+ Optional<int> Index =
+ findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth);
+ if (!Index.hasValue())
+ return;
- Builder.StatepointLowering.setLocation(Incoming, Loc);
- }
+ auto Itr = std::find(Builder.FuncInfo.StatepointStackSlots.begin(),
+ Builder.FuncInfo.StatepointStackSlots.end(), *Index);
+ assert(Itr != Builder.FuncInfo.StatepointStackSlots.end() &&
+ "value spilled to the unknown stack slot");
+
+ // This is one of our dedicated lowering slots
+ const int Offset =
+ std::distance(Builder.FuncInfo.StatepointStackSlots.begin(), Itr);
+ if (Builder.StatepointLowering.isStackSlotAllocated(Offset)) {
+ // stack slot already assigned to someone else, can't use it!
+ // TODO: currently we reserve space for gc arguments after doing
+ // normal allocation for deopt arguments. We should reserve for
+ // _all_ deopt and gc arguments, then start allocating. This
+ // will prevent some moves being inserted when vm state changes,
+ // but gc state doesn't between two calls.
+ return;
}
+ // Reserve this stack slot
+ Builder.StatepointLowering.reserveStackSlot(Offset);
- // TODO: handle case where a reloaded value flows through a phi to
- // another safepoint. e.g.
- // bb1:
- // a' = relocated...
- // bb2: % pred: bb1, bb3, bb4, etc.
- // a_phi = phi(a', ...)
- // statepoint ... a_phi
- // NOTE: This will require reasoning about cross basic block values. This is
- // decidedly non trivial and this might not be the right place to do it. We
- // don't really have the information we need here...
-
- // TODO: handle simple updates. If a value is modified and the original
- // value is no longer live, it would be nice to put the modified value in the
- // same slot. This allows folding of the memory accesses for some
- // instructions types (like an increment).
- // statepoint (i)
- // i1 = i+1
- // statepoint (i1)
+ // Cache this slot so we find it when going through the normal
+ // assignment loop.
+ SDValue Loc = Builder.DAG.getTargetFrameIndex(*Index, Incoming.getValueType());
+ Builder.StatepointLowering.setLocation(Incoming, Loc);
}
/// Remove any duplicate (as SDValues) from the derived pointer pairs. This
@@ -319,8 +372,7 @@ static void getIncomingStatepointGCValues(
SmallVectorImpl<const Value *> &Bases, SmallVectorImpl<const Value *> &Ptrs,
SmallVectorImpl<const Value *> &Relocs, ImmutableStatepoint StatepointSite,
SelectionDAGBuilder &Builder) {
- for (GCRelocateOperands relocateOpers :
- StatepointSite.getRelocates(StatepointSite)) {
+ for (GCRelocateOperands relocateOpers : StatepointSite.getRelocates()) {
Relocs.push_back(relocateOpers.getUnderlyingCallSite().getInstruction());
Bases.push_back(relocateOpers.getBasePtr());
Ptrs.push_back(relocateOpers.getDerivedPtr());
@@ -458,15 +510,11 @@ static void lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
// doesn't change semantics at all. It is important for performance that we
// reserve slots for both deopt and gc values before lowering either.
for (const Value *V : StatepointSite.vm_state_args()) {
- SDValue Incoming = Builder.getValue(V);
- reservePreviousStackSlotForValue(Incoming, Builder);
+ reservePreviousStackSlotForValue(V, Builder);
}
for (unsigned i = 0; i < Bases.size(); ++i) {
- const Value *Base = Bases[i];
- reservePreviousStackSlotForValue(Builder.getValue(Base), Builder);
-
- const Value *Ptr = Ptrs[i];
- reservePreviousStackSlotForValue(Builder.getValue(Ptr), Builder);
+ reservePreviousStackSlotForValue(Bases[i], Builder);
+ reservePreviousStackSlotForValue(Ptrs[i], Builder);
}
// First, prefix the list with the number of unique values to be
@@ -524,8 +572,7 @@ static void lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
FunctionLoweringInfo::StatepointSpilledValueMapTy &SpillMap =
Builder.FuncInfo.StatepointRelocatedValues[StatepointInstr];
- for (GCRelocateOperands RelocateOpers :
- StatepointSite.getRelocates(StatepointSite)) {
+ for (GCRelocateOperands RelocateOpers : StatepointSite.getRelocates()) {
const Value *V = RelocateOpers.getDerivedPtr();
SDValue SDV = Builder.getValue(V);
SDValue Loc = Builder.StatepointLowering.getLocation(SDV);
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 9daf2a50ad8f..c70c3a270403 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -2671,8 +2671,9 @@ SDValue TargetLowering::BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
// TODO: For UDIV use SRL instead of SRA.
SDValue Amt =
DAG.getConstant(ShAmt, dl, getShiftAmountTy(Op1.getValueType()));
- Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, false, false,
- true);
+ SDNodeFlags Flags;
+ Flags.setExact(true);
+ Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, &Flags);
d = d.ashr(ShAmt);
}
diff --git a/lib/CodeGen/ShadowStackGCLowering.cpp b/lib/CodeGen/ShadowStackGCLowering.cpp
index 7c0b2bb45698..d60e5f9ba099 100644
--- a/lib/CodeGen/ShadowStackGCLowering.cpp
+++ b/lib/CodeGen/ShadowStackGCLowering.cpp
@@ -59,7 +59,7 @@ private:
Type *Ty, Value *BasePtr, int Idx1, int Idx2,
const char *Name);
};
-}
+} // namespace
INITIALIZE_PASS_BEGIN(ShadowStackGCLowering, "shadow-stack-gc-lowering",
"Shadow Stack GC Lowering", false, false)
@@ -144,10 +144,14 @@ public:
BasicBlock *CleanupBB = BasicBlock::Create(C, CleanupBBName, &F);
Type *ExnTy =
StructType::get(Type::getInt8PtrTy(C), Type::getInt32Ty(C), nullptr);
- Constant *PersFn = F.getParent()->getOrInsertFunction(
- "__gcc_personality_v0", FunctionType::get(Type::getInt32Ty(C), true));
+ if (!F.hasPersonalityFn()) {
+ Constant *PersFn = F.getParent()->getOrInsertFunction(
+ "__gcc_personality_v0",
+ FunctionType::get(Type::getInt32Ty(C), true));
+ F.setPersonalityFn(PersFn);
+ }
LandingPadInst *LPad =
- LandingPadInst::Create(ExnTy, PersFn, 1, "cleanup.lpad", CleanupBB);
+ LandingPadInst::Create(ExnTy, 1, "cleanup.lpad", CleanupBB);
LPad->setCleanup(true);
ResumeInst *RI = ResumeInst::Create(LPad, CleanupBB);
@@ -185,7 +189,7 @@ public:
}
}
};
-}
+} // namespace
Constant *ShadowStackGCLowering::GetFrameMap(Function &F) {
diff --git a/lib/CodeGen/SjLjEHPrepare.cpp b/lib/CodeGen/SjLjEHPrepare.cpp
index 42d277ebed0f..116fd5be0337 100644
--- a/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/lib/CodeGen/SjLjEHPrepare.cpp
@@ -227,7 +227,7 @@ Value *SjLjEHPrepare::setupFunctionContext(Function &F,
// Personality function
IRBuilder<> Builder(EntryBB->getTerminator());
if (!PersonalityFn)
- PersonalityFn = LPads[0]->getPersonalityFn();
+ PersonalityFn = F.getPersonalityFn();
Value *PersonalityFieldPtr = Builder.CreateConstGEP2_32(
FunctionContextTy, FuncCtx, 0, 3, "pers_fn_gep");
Builder.CreateStore(
diff --git a/lib/CodeGen/Spiller.h b/lib/CodeGen/Spiller.h
index 08f99ec78adc..b1019c1affd7 100644
--- a/lib/CodeGen/Spiller.h
+++ b/lib/CodeGen/Spiller.h
@@ -37,6 +37,6 @@ namespace llvm {
MachineFunction &mf,
VirtRegMap &vrm);
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/SplitKit.h b/lib/CodeGen/SplitKit.h
index a0627634a822..4eaf03ef2e63 100644
--- a/lib/CodeGen/SplitKit.h
+++ b/lib/CodeGen/SplitKit.h
@@ -466,6 +466,6 @@ public:
unsigned IntvOut, SlotIndex EnterAfter);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/CodeGen/StatepointExampleGC.cpp b/lib/CodeGen/StatepointExampleGC.cpp
index 95dfd75018c1..b9523e55b0c3 100644
--- a/lib/CodeGen/StatepointExampleGC.cpp
+++ b/lib/CodeGen/StatepointExampleGC.cpp
@@ -45,7 +45,7 @@ public:
return (1 == PT->getAddressSpace());
}
};
-}
+} // namespace
static GCRegistry::Add<StatepointGC> X("statepoint-example",
"an example strategy for statepoint");
diff --git a/lib/CodeGen/TailDuplication.cpp b/lib/CodeGen/TailDuplication.cpp
index 23f41c8dd4bd..164badd29381 100644
--- a/lib/CodeGen/TailDuplication.cpp
+++ b/lib/CodeGen/TailDuplication.cpp
@@ -125,7 +125,7 @@ namespace {
};
char TailDuplicatePass::ID = 0;
-}
+} // namespace
char &llvm::TailDuplicateID = TailDuplicatePass::ID;
diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp
index c809087d3da4..97ca0253d376 100644
--- a/lib/CodeGen/TargetInstrInfo.cpp
+++ b/lib/CodeGen/TargetInstrInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/StackMaps.h"
+#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
@@ -219,9 +220,8 @@ TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
return !isPredicated(MI);
}
-
-bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const {
+bool TargetInstrInfo::PredicateInstruction(
+ MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
bool MadeChange = false;
assert(!MI->isBundle() &&
@@ -802,9 +802,10 @@ getInstrLatency(const InstrItineraryData *ItinData,
return ItinData->getStageLatency(MI->getDesc().getSchedClass());
}
-bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
+bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI,
unsigned DefIdx) const {
+ const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
if (!ItinData || ItinData->isEmpty())
return false;
diff --git a/lib/CodeGen/UnreachableBlockElim.cpp b/lib/CodeGen/UnreachableBlockElim.cpp
index d393e103104d..5c54cdbc1d5f 100644
--- a/lib/CodeGen/UnreachableBlockElim.cpp
+++ b/lib/CodeGen/UnreachableBlockElim.cpp
@@ -51,7 +51,7 @@ namespace {
AU.addPreserved<DominatorTreeWrapperPass>();
}
};
-}
+} // namespace
char UnreachableBlockElim::ID = 0;
INITIALIZE_PASS(UnreachableBlockElim, "unreachableblockelim",
"Remove unreachable blocks from the CFG", false, false)
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp
index 32d5100f8495..2912bdd63426 100644
--- a/lib/CodeGen/VirtRegMap.cpp
+++ b/lib/CodeGen/VirtRegMap.cpp
@@ -167,6 +167,7 @@ class VirtRegRewriter : public MachineFunctionPass {
void rewrite();
void addMBBLiveIns();
+ bool readsUndefSubreg(const MachineOperand &MO) const;
public:
static char ID;
VirtRegRewriter() : MachineFunctionPass(ID) {}
@@ -288,6 +289,31 @@ void VirtRegRewriter::addMBBLiveIns() {
MBB.sortUniqueLiveIns();
}
+/// Returns true if the given machine operand \p MO only reads undefined lanes.
+/// The function only works for use operands with a subregister set.
+bool VirtRegRewriter::readsUndefSubreg(const MachineOperand &MO) const {
+ // Shortcut if the operand is already marked undef.
+ if (MO.isUndef())
+ return true;
+
+ unsigned Reg = MO.getReg();
+ const LiveInterval &LI = LIS->getInterval(Reg);
+ const MachineInstr &MI = *MO.getParent();
+ SlotIndex BaseIndex = LIS->getInstructionIndex(&MI);
+ // This code is only meant to handle reading undefined subregisters which
+ // we couldn't properly detect before.
+ assert(LI.liveAt(BaseIndex) &&
+ "Reads of completely dead register should be marked undef already");
+ unsigned SubRegIdx = MO.getSubReg();
+ unsigned UseMask = TRI->getSubRegIndexLaneMask(SubRegIdx);
+ // See if any of the relevant subregister liveranges is defined at this point.
+ for (const LiveInterval::SubRange &SR : LI.subranges()) {
+ if ((SR.LaneMask & UseMask) != 0 && SR.liveAt(BaseIndex))
+ return false;
+ }
+ return true;
+}
+
void VirtRegRewriter::rewrite() {
bool NoSubRegLiveness = !MRI->subRegLivenessEnabled();
SmallVector<unsigned, 8> SuperDeads;
@@ -367,32 +393,51 @@ void VirtRegRewriter::rewrite() {
assert(!MRI->isReserved(PhysReg) && "Reserved register assignment");
// Preserve semantics of sub-register operands.
- if (MO.getSubReg()) {
- // A virtual register kill refers to the whole register, so we may
- // have to add <imp-use,kill> operands for the super-register. A
- // partial redef always kills and redefines the super-register.
- if (NoSubRegLiveness && MO.readsReg()
- && (MO.isDef() || MO.isKill()))
- SuperKills.push_back(PhysReg);
-
- if (MO.isDef()) {
- // The <def,undef> flag only makes sense for sub-register defs, and
- // we are substituting a full physreg. An <imp-use,kill> operand
- // from the SuperKills list will represent the partial read of the
- // super-register.
- MO.setIsUndef(false);
-
- // Also add implicit defs for the super-register.
- if (NoSubRegLiveness) {
+ unsigned SubReg = MO.getSubReg();
+ if (SubReg != 0) {
+ if (NoSubRegLiveness) {
+ // A virtual register kill refers to the whole register, so we may
+ // have to add <imp-use,kill> operands for the super-register. A
+ // partial redef always kills and redefines the super-register.
+ if (MO.readsReg() && (MO.isDef() || MO.isKill()))
+ SuperKills.push_back(PhysReg);
+
+ if (MO.isDef()) {
+ // Also add implicit defs for the super-register.
if (MO.isDead())
SuperDeads.push_back(PhysReg);
else
SuperDefs.push_back(PhysReg);
}
+ } else {
+ if (MO.isUse()) {
+ if (readsUndefSubreg(MO))
+ // We need to add an <undef> flag if the subregister is
+ // completely undefined (and we are not adding super-register
+ // defs).
+ MO.setIsUndef(true);
+ } else if (!MO.isDead()) {
+ assert(MO.isDef());
+ // Things get tricky when we ran out of lane mask bits and
+ // merged multiple lanes into the overflow bit: In this case
+ // our subregister liveness tracking isn't precise and we can't
+ // know what subregister parts are undefined, fall back to the
+ // implicit super-register def then.
+ unsigned LaneMask = TRI->getSubRegIndexLaneMask(SubReg);
+ if (TargetRegisterInfo::isImpreciseLaneMask(LaneMask))
+ SuperDefs.push_back(PhysReg);
+ }
}
+ // The <def,undef> flag only makes sense for sub-register defs, and
+ // we are substituting a full physreg. An <imp-use,kill> operand
+ // from the SuperKills list will represent the partial read of the
+ // super-register.
+ if (MO.isDef())
+ MO.setIsUndef(false);
+
// PhysReg operands cannot have subregister indexes.
- PhysReg = TRI->getSubReg(PhysReg, MO.getSubReg());
+ PhysReg = TRI->getSubReg(PhysReg, SubReg);
assert(PhysReg && "Invalid SubReg for physical register");
MO.setSubReg(0);
}
diff --git a/lib/CodeGen/WinEHPrepare.cpp b/lib/CodeGen/WinEHPrepare.cpp
index c2b3d84ca363..8c932cfc6b37 100644
--- a/lib/CodeGen/WinEHPrepare.cpp
+++ b/lib/CodeGen/WinEHPrepare.cpp
@@ -24,6 +24,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/LibCallSemantics.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
@@ -110,7 +111,7 @@ private:
bool outlineHandler(ActionHandler *Action, Function *SrcFn,
LandingPadInst *LPad, BasicBlock *StartBB,
FrameVarInfoMap &VarInfo);
- void addStubInvokeToHandlerIfNeeded(Function *Handler, Value *PersonalityFn);
+ void addStubInvokeToHandlerIfNeeded(Function *Handler);
void mapLandingPadBlocks(LandingPadInst *LPad, LandingPadActions &Actions);
CatchHandler *findCatchHandler(BasicBlock *BB, BasicBlock *&NextBB,
@@ -124,6 +125,7 @@ private:
// All fields are reset by runOnFunction.
DominatorTree *DT = nullptr;
+ const TargetLibraryInfo *LibInfo = nullptr;
EHPersonality Personality = EHPersonality::Unknown;
CatchHandlerMapTy CatchHandlerMap;
CleanupHandlerMapTy CleanupHandlerMap;
@@ -377,13 +379,14 @@ bool WinEHPrepare::runOnFunction(Function &Fn) {
return false;
// Classify the personality to see what kind of preparation we need.
- Personality = classifyEHPersonality(LPads.back()->getPersonalityFn());
+ Personality = classifyEHPersonality(Fn.getPersonalityFn());
// Do nothing if this is not an MSVC personality.
if (!isMSVCEHPersonality(Personality))
return false;
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
// If there were any landing pads, prepareExceptionHandlers will make changes.
prepareExceptionHandlers(Fn, LPads);
@@ -394,6 +397,7 @@ bool WinEHPrepare::doFinalization(Module &M) { return false; }
void WinEHPrepare::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
}
static bool isSelectorDispatch(BasicBlock *BB, BasicBlock *&CatchHandler,
@@ -1016,10 +1020,17 @@ bool WinEHPrepare::prepareExceptionHandlers(
Builder.CreateCall(FrameEscapeFn, AllocasToEscape);
if (SEHExceptionCodeSlot) {
- if (SEHExceptionCodeSlot->hasNUses(0))
- SEHExceptionCodeSlot->eraseFromParent();
- else if (isAllocaPromotable(SEHExceptionCodeSlot))
+ if (isAllocaPromotable(SEHExceptionCodeSlot)) {
+ SmallPtrSet<BasicBlock *, 4> UserBlocks;
+ for (User *U : SEHExceptionCodeSlot->users()) {
+ if (auto *Inst = dyn_cast<Instruction>(U))
+ UserBlocks.insert(Inst->getParent());
+ }
PromoteMemToReg(SEHExceptionCodeSlot, *DT);
+ // After the promotion, kill off dead instructions.
+ for (BasicBlock *BB : UserBlocks)
+ SimplifyInstructionsInBlock(BB, LibInfo);
+ }
}
// Clean up the handler action maps we created for this function
@@ -1029,6 +1040,7 @@ bool WinEHPrepare::prepareExceptionHandlers(
CleanupHandlerMap.clear();
HandlerToParentFP.clear();
DT = nullptr;
+ LibInfo = nullptr;
SEHExceptionCodeSlot = nullptr;
EHBlocks.clear();
NormalBlocks.clear();
@@ -1143,7 +1155,6 @@ void WinEHPrepare::completeNestedLandingPad(Function *ParentFn,
++II;
// The instruction after the landing pad should now be a call to eh.actions.
const Instruction *Recover = II;
- assert(match(Recover, m_Intrinsic<Intrinsic::eh_actions>()));
const IntrinsicInst *EHActions = cast<IntrinsicInst>(Recover);
// Remap the return target in the nested handler.
@@ -1254,8 +1265,7 @@ static bool isCatchBlock(BasicBlock *BB) {
return false;
}
-static BasicBlock *createStubLandingPad(Function *Handler,
- Value *PersonalityFn) {
+static BasicBlock *createStubLandingPad(Function *Handler) {
// FIXME: Finish this!
LLVMContext &Context = Handler->getContext();
BasicBlock *StubBB = BasicBlock::Create(Context, "stub");
@@ -1264,7 +1274,7 @@ static BasicBlock *createStubLandingPad(Function *Handler,
LandingPadInst *LPad = Builder.CreateLandingPad(
llvm::StructType::get(Type::getInt8PtrTy(Context),
Type::getInt32Ty(Context), nullptr),
- PersonalityFn, 0);
+ 0);
// Insert a call to llvm.eh.actions so that we don't try to outline this lpad.
Function *ActionIntrin =
Intrinsic::getDeclaration(Handler->getParent(), Intrinsic::eh_actions);
@@ -1279,8 +1289,7 @@ static BasicBlock *createStubLandingPad(Function *Handler,
// landing pad if none is found. The code that generates the .xdata tables for
// the handler needs at least one landing pad to identify the parent function's
// personality.
-void WinEHPrepare::addStubInvokeToHandlerIfNeeded(Function *Handler,
- Value *PersonalityFn) {
+void WinEHPrepare::addStubInvokeToHandlerIfNeeded(Function *Handler) {
ReturnInst *Ret = nullptr;
UnreachableInst *Unreached = nullptr;
for (BasicBlock &BB : *Handler) {
@@ -1312,7 +1321,7 @@ void WinEHPrepare::addStubInvokeToHandlerIfNeeded(Function *Handler,
// parent block. We want to replace that with an invoke call, so we can
// erase it now.
OldRetBB->getTerminator()->eraseFromParent();
- BasicBlock *StubLandingPad = createStubLandingPad(Handler, PersonalityFn);
+ BasicBlock *StubLandingPad = createStubLandingPad(Handler);
Function *F =
Intrinsic::getDeclaration(Handler->getParent(), Intrinsic::donothing);
InvokeInst::Create(F, NewRetBB, StubLandingPad, None, "", OldRetBB);
@@ -1368,6 +1377,7 @@ bool WinEHPrepare::outlineHandler(ActionHandler *Action, Function *SrcFn,
Handler = createHandlerFunc(Type::getVoidTy(Context),
SrcFn->getName() + ".cleanup", M, ParentFP);
}
+ Handler->setPersonalityFn(SrcFn->getPersonalityFn());
HandlerToParentFP[Handler] = ParentFP;
Handler->addFnAttr("wineh-parent", SrcFn->getName());
BasicBlock *Entry = &Handler->getEntryBlock();
@@ -1445,7 +1455,7 @@ bool WinEHPrepare::outlineHandler(ActionHandler *Action, Function *SrcFn,
ClonedEntryBB->eraseFromParent();
// Make sure we can identify the handler's personality later.
- addStubInvokeToHandlerIfNeeded(Handler, LPad->getPersonalityFn());
+ addStubInvokeToHandlerIfNeeded(Handler);
if (auto *CatchAction = dyn_cast<CatchHandler>(Action)) {
WinEHCatchDirector *CatchDirector =
@@ -2286,7 +2296,7 @@ void WinEHPrepare::findCleanupHandlers(LandingPadActions &Actions,
// value for this block but the value is a nullptr. This means that
// we have previously analyzed the block and determined that it did
// not contain any cleanup code. Based on the earlier analysis, we
- // know the the block must end in either an unconditional branch, a
+ // know the block must end in either an unconditional branch, a
// resume or a conditional branch that is predicated on a comparison
// with a selector. Either the resume or the selector dispatch
// would terminate the search for cleanup code, so the unconditional
@@ -2454,6 +2464,8 @@ void WinEHPrepare::findCleanupHandlers(LandingPadActions &Actions,
void llvm::parseEHActions(
const IntrinsicInst *II,
SmallVectorImpl<std::unique_ptr<ActionHandler>> &Actions) {
+ assert(II->getIntrinsicID() == Intrinsic::eh_actions &&
+ "attempted to parse non eh.actions intrinsic");
for (unsigned I = 0, E = II->getNumArgOperands(); I != E;) {
uint64_t ActionKind =
cast<ConstantInt>(II->getArgOperand(I))->getZExtValue();
@@ -2506,7 +2518,7 @@ struct WinEHNumbering {
void calculateStateNumbers(const Function &F);
void findActionRootLPads(const Function &F);
};
-}
+} // namespace
void WinEHNumbering::createUnwindMapEntry(int ToState, ActionHandler *AH) {
WinEHUnwindMapEntry UME;
@@ -2766,7 +2778,6 @@ void WinEHNumbering::calculateStateNumbers(const Function &F) {
auto *ActionsCall = dyn_cast<IntrinsicInst>(LPI->getNextNode());
if (!ActionsCall)
continue;
- assert(ActionsCall->getIntrinsicID() == Intrinsic::eh_actions);
parseEHActions(ActionsCall, ActionList);
if (ActionList.empty())
continue;
diff --git a/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp b/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
index 8ae05432869a..fd33c7d54749 100644
--- a/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
+++ b/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
@@ -129,4 +129,4 @@ void DWARFAcceleratorTable::dump(raw_ostream &OS) const {
}
}
}
-}
+} // namespace llvm
diff --git a/lib/DebugInfo/DWARF/DWARFContext.cpp b/lib/DebugInfo/DWARF/DWARFContext.cpp
index baab3873b915..32654f830f07 100644
--- a/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -674,7 +674,7 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
uint64_t SymAddr = 0;
uint64_t SectionLoadAddress = 0;
object::symbol_iterator Sym = Reloc.getSymbol();
- object::section_iterator RSec = Reloc.getSection();
+ object::section_iterator RSec = Obj.section_end();
// First calculate the address of the symbol or section as it appears
// in the objct file
@@ -682,8 +682,13 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
Sym->getAddress(SymAddr);
// Also remember what section this symbol is in for later
Sym->getSection(RSec);
- } else if (RSec != Obj.section_end())
+ } else if (auto *MObj = dyn_cast<MachOObjectFile>(&Obj)) {
+ // MachO also has relocations that point to sections and
+ // scattered relocations.
+ // FIXME: We are not handling scattered relocations, do we have to?
+ RSec = MObj->getRelocationSection(Reloc.getRawDataRefImpl());
SymAddr = RSec->getAddress();
+ }
// If we are given load addresses for the sections, we need to adjust:
// SymAddr = (Address of Symbol Or Section in File) -
diff --git a/lib/DebugInfo/DWARF/DWARFFormValue.cpp b/lib/DebugInfo/DWARF/DWARFFormValue.cpp
index 53a676efaf3f..48e1d55be5f7 100644
--- a/lib/DebugInfo/DWARF/DWARFFormValue.cpp
+++ b/lib/DebugInfo/DWARF/DWARFFormValue.cpp
@@ -61,7 +61,7 @@ ArrayRef<uint8_t> makeFixedFormSizesArrayRef() {
};
return makeArrayRef(sizes);
}
-}
+} // namespace
ArrayRef<uint8_t> DWARFFormValue::getFixedFormSizes(uint8_t AddrSize,
uint16_t Version) {
diff --git a/lib/DebugInfo/DWARF/SyntaxHighlighting.h b/lib/DebugInfo/DWARF/SyntaxHighlighting.h
index 946a31308aa1..84afd37c540a 100644
--- a/lib/DebugInfo/DWARF/SyntaxHighlighting.h
+++ b/lib/DebugInfo/DWARF/SyntaxHighlighting.h
@@ -32,8 +32,8 @@ public:
llvm::raw_ostream& get() { return OS; }
operator llvm::raw_ostream& () { return OS; }
};
-}
-}
-}
+} // namespace syntax
+} // namespace dwarf
+} // namespace llvm
#endif
diff --git a/lib/DebugInfo/PDB/CMakeLists.txt b/lib/DebugInfo/PDB/CMakeLists.txt
index 68d3402c5603..1645a95aac36 100644
--- a/lib/DebugInfo/PDB/CMakeLists.txt
+++ b/lib/DebugInfo/PDB/CMakeLists.txt
@@ -9,7 +9,7 @@ if(HAVE_DIA_SDK)
if (CMAKE_SIZEOF_VOID_P EQUAL 8)
set(LIBPDB_LINK_FOLDERS "${LIBPDB_LINK_FOLDERS}\\amd64")
endif()
- set(LIBPDB_ADDITIONAL_LIBRARIES "${LIBPDB_LINK_FOLDERS}\\diaguids.lib")
+ file(TO_CMAKE_PATH "${LIBPDB_LINK_FOLDERS}\\diaguids.lib" LIBPDB_ADDITIONAL_LIBRARIES)
add_pdb_impl_folder(DIA
DIA/DIADataStream.cpp
diff --git a/lib/DebugInfo/PDB/PDBSymbolFunc.cpp b/lib/DebugInfo/PDB/PDBSymbolFunc.cpp
index 0aff327366cb..8f56de804964 100644
--- a/lib/DebugInfo/PDB/PDBSymbolFunc.cpp
+++ b/lib/DebugInfo/PDB/PDBSymbolFunc.cpp
@@ -80,7 +80,7 @@ private:
ArgListType Args;
ArgListType::const_iterator CurIter;
};
-}
+} // namespace
PDBSymbolFunc::PDBSymbolFunc(const IPDBSession &PDBSession,
std::unique_ptr<IPDBRawSymbol> Symbol)
diff --git a/lib/DebugInfo/PDB/PDBSymbolTypeFunctionSig.cpp b/lib/DebugInfo/PDB/PDBSymbolTypeFunctionSig.cpp
index af3563f891f8..fcee1825f7d7 100644
--- a/lib/DebugInfo/PDB/PDBSymbolTypeFunctionSig.cpp
+++ b/lib/DebugInfo/PDB/PDBSymbolTypeFunctionSig.cpp
@@ -63,7 +63,7 @@ private:
const IPDBSession &Session;
std::unique_ptr<ArgEnumeratorType> Enumerator;
};
-}
+} // namespace
PDBSymbolTypeFunctionSig::PDBSymbolTypeFunctionSig(
const IPDBSession &PDBSession, std::unique_ptr<IPDBRawSymbol> Symbol)
diff --git a/lib/ExecutionEngine/CMakeLists.txt b/lib/ExecutionEngine/CMakeLists.txt
index e8a18d3e5af4..2d9337bbefd2 100644
--- a/lib/ExecutionEngine/CMakeLists.txt
+++ b/lib/ExecutionEngine/CMakeLists.txt
@@ -9,6 +9,9 @@ add_llvm_library(LLVMExecutionEngine
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/ExecutionEngine
+
+ DEPENDS
+ intrinsics_gen
)
add_subdirectory(Interpreter)
diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp
index 9e71b108280b..94e809061c71 100644
--- a/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -153,6 +153,14 @@ Function *ExecutionEngine::FindFunctionNamed(const char *FnName) {
return nullptr;
}
+GlobalVariable *ExecutionEngine::FindGlobalVariableNamed(const char *Name, bool AllowInternal) {
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
+ GlobalVariable *GV = Modules[i]->getGlobalVariable(Name,AllowInternal);
+ if (GV && !GV->isDeclaration())
+ return GV;
+ }
+ return nullptr;
+}
uint64_t ExecutionEngineState::RemoveMapping(StringRef Name) {
GlobalAddressMapTy::iterator I = GlobalAddressMap.find(Name);
@@ -376,7 +384,7 @@ void ExecutionEngine::runStaticConstructorsDestructors(Module &module,
// Execute the ctor/dtor function!
if (Function *F = dyn_cast<Function>(FP))
- runFunction(F, std::vector<GenericValue>());
+ runFunction(F, None);
// FIXME: It is marginally lame that we just do nothing here if we see an
// entry we don't recognize. It might not be unreasonable for the verifier
diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp
index 39a8027005f8..dbfa37e2b0da 100644
--- a/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -2073,8 +2073,7 @@ GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
//===----------------------------------------------------------------------===//
// callFunction - Execute the specified function...
//
-void Interpreter::callFunction(Function *F,
- const std::vector<GenericValue> &ArgVals) {
+void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() ||
ECStack.back().Caller.arg_size() == ArgVals.size()) &&
"Incorrect number of arguments passed into function call!");
diff --git a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index e2fe0651c7e7..9b44042d6144 100644
--- a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -49,8 +49,7 @@ using namespace llvm;
static ManagedStatic<sys::Mutex> FunctionsLock;
-typedef GenericValue (*ExFunc)(FunctionType *,
- const std::vector<GenericValue> &);
+typedef GenericValue (*ExFunc)(FunctionType *, ArrayRef<GenericValue>);
static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions;
static ManagedStatic<std::map<std::string, ExFunc> > FuncNames;
@@ -178,8 +177,7 @@ static void *ffiValueFor(Type *Ty, const GenericValue &AV,
return NULL;
}
-static bool ffiInvoke(RawFunc Fn, Function *F,
- const std::vector<GenericValue> &ArgVals,
+static bool ffiInvoke(RawFunc Fn, Function *F, ArrayRef<GenericValue> ArgVals,
const DataLayout *TD, GenericValue &Result) {
ffi_cif cif;
FunctionType *FTy = F->getFunctionType();
@@ -245,7 +243,7 @@ static bool ffiInvoke(RawFunc Fn, Function *F,
#endif // USE_LIBFFI
GenericValue Interpreter::callExternalFunction(Function *F,
- const std::vector<GenericValue> &ArgVals) {
+ ArrayRef<GenericValue> ArgVals) {
TheInterpreter = this;
unique_lock<sys::Mutex> Guard(*FunctionsLock);
@@ -298,9 +296,8 @@ GenericValue Interpreter::callExternalFunction(Function *F,
//
// void atexit(Function*)
-static
-GenericValue lle_X_atexit(FunctionType *FT,
- const std::vector<GenericValue> &Args) {
+static GenericValue lle_X_atexit(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
assert(Args.size() == 1);
TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
GenericValue GV;
@@ -309,17 +306,13 @@ GenericValue lle_X_atexit(FunctionType *FT,
}
// void exit(int)
-static
-GenericValue lle_X_exit(FunctionType *FT,
- const std::vector<GenericValue> &Args) {
+static GenericValue lle_X_exit(FunctionType *FT, ArrayRef<GenericValue> Args) {
TheInterpreter->exitCalled(Args[0]);
return GenericValue();
}
// void abort(void)
-static
-GenericValue lle_X_abort(FunctionType *FT,
- const std::vector<GenericValue> &Args) {
+static GenericValue lle_X_abort(FunctionType *FT, ArrayRef<GenericValue> Args) {
//FIXME: should we report or raise here?
//report_fatal_error("Interpreted program raised SIGABRT");
raise (SIGABRT);
@@ -328,9 +321,8 @@ GenericValue lle_X_abort(FunctionType *FT,
// int sprintf(char *, const char *, ...) - a very rough implementation to make
// output useful.
-static
-GenericValue lle_X_sprintf(FunctionType *FT,
- const std::vector<GenericValue> &Args) {
+static GenericValue lle_X_sprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
char *OutputBuffer = (char *)GVTOP(Args[0]);
const char *FmtStr = (const char *)GVTOP(Args[1]);
unsigned ArgNo = 2;
@@ -411,9 +403,8 @@ GenericValue lle_X_sprintf(FunctionType *FT,
// int printf(const char *, ...) - a very rough implementation to make output
// useful.
-static
-GenericValue lle_X_printf(FunctionType *FT,
- const std::vector<GenericValue> &Args) {
+static GenericValue lle_X_printf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
char Buffer[10000];
std::vector<GenericValue> NewArgs;
NewArgs.push_back(PTOGV((void*)&Buffer[0]));
@@ -424,9 +415,8 @@ GenericValue lle_X_printf(FunctionType *FT,
}
// int sscanf(const char *format, ...);
-static
-GenericValue lle_X_sscanf(FunctionType *FT,
- const std::vector<GenericValue> &args) {
+static GenericValue lle_X_sscanf(FunctionType *FT,
+ ArrayRef<GenericValue> args) {
assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
char *Args[10];
@@ -440,9 +430,7 @@ GenericValue lle_X_sscanf(FunctionType *FT,
}
// int scanf(const char *format, ...);
-static
-GenericValue lle_X_scanf(FunctionType *FT,
- const std::vector<GenericValue> &args) {
+static GenericValue lle_X_scanf(FunctionType *FT, ArrayRef<GenericValue> args) {
assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
char *Args[10];
@@ -457,9 +445,8 @@ GenericValue lle_X_scanf(FunctionType *FT,
// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
// output useful.
-static
-GenericValue lle_X_fprintf(FunctionType *FT,
- const std::vector<GenericValue> &Args) {
+static GenericValue lle_X_fprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
assert(Args.size() >= 2);
char Buffer[10000];
std::vector<GenericValue> NewArgs;
@@ -472,7 +459,7 @@ GenericValue lle_X_fprintf(FunctionType *FT,
}
static GenericValue lle_X_memset(FunctionType *FT,
- const std::vector<GenericValue> &Args) {
+ ArrayRef<GenericValue> Args) {
int val = (int)Args[1].IntVal.getSExtValue();
size_t len = (size_t)Args[2].IntVal.getZExtValue();
memset((void *)GVTOP(Args[0]), val, len);
@@ -484,7 +471,7 @@ static GenericValue lle_X_memset(FunctionType *FT,
}
static GenericValue lle_X_memcpy(FunctionType *FT,
- const std::vector<GenericValue> &Args) {
+ ArrayRef<GenericValue> Args) {
memcpy(GVTOP(Args[0]), GVTOP(Args[1]),
(size_t)(Args[2].IntVal.getLimitedValue()));
diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/lib/ExecutionEngine/Interpreter/Interpreter.cpp
index 8562981b629a..f103c09659aa 100644
--- a/lib/ExecutionEngine/Interpreter/Interpreter.cpp
+++ b/lib/ExecutionEngine/Interpreter/Interpreter.cpp
@@ -67,7 +67,7 @@ Interpreter::~Interpreter() {
void Interpreter::runAtExitHandlers () {
while (!AtExitHandlers.empty()) {
- callFunction(AtExitHandlers.back(), std::vector<GenericValue>());
+ callFunction(AtExitHandlers.back(), None);
AtExitHandlers.pop_back();
run();
}
@@ -75,9 +75,8 @@ void Interpreter::runAtExitHandlers () {
/// run - Start execution with the specified function and arguments.
///
-GenericValue
-Interpreter::runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues) {
+GenericValue Interpreter::runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) {
assert (F && "Function *F was null at entry to run()");
// Try extra hard not to pass extra args to a function that isn't
@@ -87,10 +86,9 @@ Interpreter::runFunction(Function *F,
// parameters than it is declared to take. This does not attempt to
// take into account gratuitous differences in declared types,
// though.
- std::vector<GenericValue> ActualArgs;
- const unsigned ArgCount = F->getFunctionType()->getNumParams();
- for (unsigned i = 0; i < ArgCount; ++i)
- ActualArgs.push_back(ArgValues[i]);
+ const size_t ArgCount = F->getFunctionType()->getNumParams();
+ ArrayRef<GenericValue> ActualArgs =
+ ArgValues.slice(0, std::min(ArgValues.size(), ArgCount));
// Set up the function call.
callFunction(F, ActualArgs);
diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.h b/lib/ExecutionEngine/Interpreter/Interpreter.h
index 0dc0463903d4..f6cac580e26f 100644
--- a/lib/ExecutionEngine/Interpreter/Interpreter.h
+++ b/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -127,7 +127,7 @@ public:
/// run - Start execution with the specified function and arguments.
///
GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues) override;
+ ArrayRef<GenericValue> ArgValues) override;
void *getPointerToNamedFunction(StringRef Name,
bool AbortOnFailure = true) override {
@@ -137,7 +137,7 @@ public:
// Methods used to execute code:
// Place a call on the stack
- void callFunction(Function *F, const std::vector<GenericValue> &ArgVals);
+ void callFunction(Function *F, ArrayRef<GenericValue> ArgVals);
void run(); // Execute instructions until nothing left to do
// Opcode Implementations
@@ -194,7 +194,7 @@ public:
}
GenericValue callExternalFunction(Function *F,
- const std::vector<GenericValue> &ArgVals);
+ ArrayRef<GenericValue> ArgVals);
void exitCalled(GenericValue GV);
void addAtExitHandler(Function *F) {
@@ -251,6 +251,6 @@ private: // Helper functions
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/ExecutionEngine/MCJIT/CMakeLists.txt b/lib/ExecutionEngine/MCJIT/CMakeLists.txt
index 2911a5077220..b1e2bc3d635c 100644
--- a/lib/ExecutionEngine/MCJIT/CMakeLists.txt
+++ b/lib/ExecutionEngine/MCJIT/CMakeLists.txt
@@ -1,3 +1,6 @@
add_llvm_library(LLVMMCJIT
MCJIT.cpp
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index 7e37afe2056e..87243e4221f4 100644
--- a/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -429,6 +429,19 @@ Function *MCJIT::FindFunctionNamedInModulePtrSet(const char *FnName,
return nullptr;
}
+GlobalVariable *MCJIT::FindGlobalVariableNamedInModulePtrSet(const char *Name,
+ bool AllowInternal,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ GlobalVariable *GV = (*I)->getGlobalVariable(Name, AllowInternal);
+ if (GV && !GV->isDeclaration())
+ return GV;
+ }
+ return nullptr;
+}
+
+
Function *MCJIT::FindFunctionNamed(const char *FnName) {
Function *F = FindFunctionNamedInModulePtrSet(
FnName, OwnedModules.begin_added(), OwnedModules.end_added());
@@ -441,8 +454,19 @@ Function *MCJIT::FindFunctionNamed(const char *FnName) {
return F;
}
-GenericValue MCJIT::runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues) {
+GlobalVariable *MCJIT::FindGlobalVariableNamed(const char *Name, bool AllowInternal) {
+ GlobalVariable *GV = FindGlobalVariableNamedInModulePtrSet(
+ Name, AllowInternal, OwnedModules.begin_added(), OwnedModules.end_added());
+ if (!GV)
+ GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_loaded(),
+ OwnedModules.end_loaded());
+ if (!GV)
+ GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_finalized(),
+ OwnedModules.end_finalized());
+ return GV;
+}
+
+GenericValue MCJIT::runFunction(Function *F, ArrayRef<GenericValue> ArgValues) {
assert(F && "Function *F was null at entry to run()");
void *FPtr = getPointerToFunction(F);
diff --git a/lib/ExecutionEngine/MCJIT/MCJIT.h b/lib/ExecutionEngine/MCJIT/MCJIT.h
index 59e99498f9a4..7fda1e0fed6e 100644
--- a/lib/ExecutionEngine/MCJIT/MCJIT.h
+++ b/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -200,6 +200,11 @@ class MCJIT : public ExecutionEngine {
ModulePtrSet::iterator I,
ModulePtrSet::iterator E);
+ GlobalVariable *FindGlobalVariableNamedInModulePtrSet(const char *Name,
+ bool AllowInternal,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
void runStaticConstructorsDestructorsInModulePtrSet(bool isDtors,
ModulePtrSet::iterator I,
ModulePtrSet::iterator E);
@@ -215,10 +220,15 @@ public:
void addArchive(object::OwningBinary<object::Archive> O) override;
bool removeModule(Module *M) override;
- /// FindFunctionNamed - Search all of the active modules to find the one that
+ /// FindFunctionNamed - Search all of the active modules to find the function that
/// defines FnName. This is very slow operation and shouldn't be used for
/// general code.
- Function *FindFunctionNamed(const char *FnName) override;
+ virtual Function *FindFunctionNamed(const char *FnName) override;
+
+ /// FindGlobalVariableNamed - Search all of the active modules to find the global variable
+ /// that defines Name. This is very slow operation and shouldn't be used for
+ /// general code.
+ virtual GlobalVariable *FindGlobalVariableNamed(const char *Name, bool AllowInternal = false) override;
/// Sets the object manager that MCJIT should use to avoid compilation.
void setObjectCache(ObjectCache *manager) override;
@@ -251,7 +261,7 @@ public:
void *getPointerToFunction(Function *F) override;
GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues) override;
+ ArrayRef<GenericValue> ArgValues) override;
/// getPointerToNamedFunction - This method returns the address of the
/// specified function by using the dlsym function call. As such it is only
@@ -325,6 +335,6 @@ protected:
bool CheckFunctionsOnly);
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/ExecutionEngine/Orc/CMakeLists.txt b/lib/ExecutionEngine/Orc/CMakeLists.txt
index 18f0441c466e..1da164237a67 100644
--- a/lib/ExecutionEngine/Orc/CMakeLists.txt
+++ b/lib/ExecutionEngine/Orc/CMakeLists.txt
@@ -6,4 +6,7 @@ add_llvm_library(LLVMOrcJIT
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/ExecutionEngine/Orc
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/ExecutionEngine/Orc/IndirectionUtils.cpp b/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
index 4ed873031482..b439810ed330 100644
--- a/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
+++ b/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
@@ -30,8 +30,6 @@ Constant* createIRTypedAddress(FunctionType &FT, TargetAddress Addr) {
GlobalVariable* createImplPointer(PointerType &PT, Module &M,
const Twine &Name, Constant *Initializer) {
- if (!Initializer)
- Initializer = Constant::getNullValue(&PT);
auto IP = new GlobalVariable(M, &PT, false, GlobalValue::ExternalLinkage,
Initializer, Name, nullptr,
GlobalValue::NotThreadLocal, 0, true);
diff --git a/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp b/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp
index 48fd31e51a6d..b7a68e041c12 100644
--- a/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp
+++ b/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp
@@ -25,7 +25,7 @@ namespace orc {
GenericValue
OrcMCJITReplacement::runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues) {
+ ArrayRef<GenericValue> ArgValues) {
assert(F && "Function *F was null at entry to run()");
void *FPtr = getPointerToFunction(F);
diff --git a/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h b/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
index 4023344d2f3d..eb39798cc740 100644
--- a/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
+++ b/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h
@@ -229,7 +229,7 @@ public:
}
GenericValue runFunction(Function *F,
- const std::vector<GenericValue> &ArgValues) override;
+ ArrayRef<GenericValue> ArgValues) override;
void setObjectCache(ObjectCache *NewCache) override {
CompileLayer.setObjectCache(NewCache);
diff --git a/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt b/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt
index e78408a3b6ae..182f98200fc1 100644
--- a/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt
+++ b/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt
@@ -5,4 +5,7 @@ add_llvm_library(LLVMRuntimeDyld
RuntimeDyldCOFF.cpp
RuntimeDyldELF.cpp
RuntimeDyldMachO.cpp
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp b/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
index 2a5e4f83228b..044eee43c9e7 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
@@ -41,8 +41,8 @@ RTDyldMemoryManager::~RTDyldMemoryManager() {}
#endif
#if HAVE_EHTABLE_SUPPORT
-extern "C" void __register_frame(void*);
-extern "C" void __deregister_frame(void*);
+extern "C" void __register_frame(void *);
+extern "C" void __deregister_frame(void *);
#else
// The building compiler does not have __(de)register_frame but
// it may be found at runtime in a dynamically-loaded library.
@@ -50,28 +50,28 @@ extern "C" void __deregister_frame(void*);
// but using the MingW runtime.
void __register_frame(void *p) {
static bool Searched = false;
- static void *rf = 0;
+ static void((*rf)(void *)) = 0;
if (!Searched) {
Searched = true;
- rf = llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
- "__register_frame");
+ *(void **)&rf =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
}
if (rf)
- ((void (*)(void *))rf)(p);
+ rf(p);
}
void __deregister_frame(void *p) {
static bool Searched = false;
- static void *df = 0;
+ static void((*df)(void *)) = 0;
if (!Searched) {
Searched = true;
- df = llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
- "__deregister_frame");
+ *(void **)&df = llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
+ "__deregister_frame");
}
if (df)
- ((void (*)(void *))df)(p);
+ df(p);
}
#endif
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
index c8d3d22966de..9f80e5a87cd0 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
@@ -36,7 +36,7 @@ public:
return OwningBinary<ObjectFile>();
}
};
-}
+} // namespace
namespace llvm {
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
index 957571b092da..c8c25169ab0e 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -673,7 +673,7 @@ private:
return (S == MCDisassembler::Success);
}
};
-}
+} // namespace llvm
RuntimeDyldCheckerImpl::RuntimeDyldCheckerImpl(RuntimeDyld &RTDyld,
MCDisassembler *Disassembler,
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
index 69d2a7d6b668..a0a11188f5ca 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
@@ -72,6 +72,6 @@ private:
StubMap Stubs;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
index b4a34e8acf3e..967d7c07bc8a 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -519,7 +519,8 @@ void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
}
void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
- if (!StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) {
+ if (Arch == Triple::UnknownArch ||
+ !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) {
IsMipsO32ABI = false;
IsMipsN64ABI = false;
return;
@@ -717,7 +718,7 @@ void RuntimeDyldELF::applyMIPS64Relocation(uint8_t *TargetPtr,
}
// Return the .TOC. section and offset.
-void RuntimeDyldELF::findPPC64TOCSection(const ObjectFile &Obj,
+void RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel) {
// Set a default SectionID in case we do not find a TOC section below.
@@ -750,7 +751,7 @@ void RuntimeDyldELF::findPPC64TOCSection(const ObjectFile &Obj,
// Returns the sections and offset associated with the ODP entry referenced
// by Symbol.
-void RuntimeDyldELF::findOPDEntrySection(const ObjectFile &Obj,
+void RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel) {
// Get the ELF symbol value (st_value) to compare with Relocation offset in
@@ -781,8 +782,10 @@ void RuntimeDyldELF::findOPDEntrySection(const ObjectFile &Obj,
uint64_t TargetSymbolOffset;
symbol_iterator TargetSymbol = i->getSymbol();
check(i->getOffset(TargetSymbolOffset));
- int64_t Addend;
- check(getELFRelocationAddend(*i, Addend));
+ ErrorOr<int64_t> AddendOrErr =
+ Obj.getRelocationAddend(i->getRawDataRefImpl());
+ Check(AddendOrErr.getError());
+ int64_t Addend = *AddendOrErr;
++i;
if (i == e)
@@ -1055,14 +1058,14 @@ void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset
}
relocation_iterator RuntimeDyldELF::processRelocationRef(
- unsigned SectionID, relocation_iterator RelI,
- const ObjectFile &Obj,
- ObjSectionToIDMap &ObjSectionToID,
- StubMap &Stubs) {
+ unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
+ ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
+ const auto &Obj = cast<ELFObjectFileBase>(O);
uint64_t RelType;
Check(RelI->getType(RelType));
- int64_t Addend;
- Check(getELFRelocationAddend(*RelI, Addend));
+ int64_t Addend = 0;
+ if (Obj.hasRelocationAddend(RelI->getRawDataRefImpl()))
+ Addend = *Obj.getRelocationAddend(RelI->getRawDataRefImpl());
symbol_iterator Symbol = RelI->getSymbol();
// Obtain the symbol name which is referenced in the relocation
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
index 3a377a2e162c..1a2552deed95 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -87,10 +87,10 @@ class RuntimeDyldELF : public RuntimeDyldImpl {
void setMipsABI(const ObjectFile &Obj) override;
- void findPPC64TOCSection(const ObjectFile &Obj,
+ void findPPC64TOCSection(const ELFObjectFileBase &Obj,
ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel);
- void findOPDEntrySection(const ObjectFile &Obj,
+ void findOPDEntrySection(const ELFObjectFileBase &Obj,
ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel);
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
index d4a680d749a1..f7a4fcc7214f 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -39,7 +39,7 @@ public:
}
};
-}
+} // namespace
namespace llvm {
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
index 99fd6e333b47..5149d010a8c6 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -400,7 +400,7 @@ private:
addRelocationForSection(TargetRE, RE.SectionID);
}
};
-}
+} // namespace llvm
#undef DEBUG_TYPE
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
index 09e51f27da4f..8600763b8448 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
@@ -272,7 +272,7 @@ private:
}
};
-}
+} // namespace llvm
#undef DEBUG_TYPE
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
index dd454ae54f26..f36f940ffd5a 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
@@ -254,7 +254,7 @@ private:
}
};
-}
+} // namespace llvm
#undef DEBUG_TYPE
diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
index 4b3b01ba3c96..419b27a1da8b 100644
--- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
+++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -131,7 +131,7 @@ private:
resolveRelocation(TargetRE, (uint64_t)Addr);
}
};
-}
+} // namespace llvm
#undef DEBUG_TYPE
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index 0744fdf40157..bc35cb3986b3 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -67,7 +67,7 @@ struct OrderMap {
IDs[V].first = ID;
}
};
-}
+} // namespace
static void orderValue(const Value *V, OrderMap &OM) {
if (OM.lookup(V).first)
@@ -109,6 +109,10 @@ static OrderMap orderModule(const Module *M) {
if (!isa<GlobalValue>(F.getPrologueData()))
orderValue(F.getPrologueData(), OM);
+ if (F.hasPersonalityFn())
+ if (!isa<GlobalValue>(F.getPersonalityFn()))
+ orderValue(F.getPersonalityFn(), OM);
+
orderValue(&F, OM);
if (F.isDeclaration())
@@ -725,33 +729,33 @@ void SlotTracker::processModule() {
ST_DEBUG("begin processModule!\n");
// Add all of the unnamed global variables to the value table.
- for (Module::const_global_iterator I = TheModule->global_begin(),
- E = TheModule->global_end(); I != E; ++I) {
- if (!I->hasName())
- CreateModuleSlot(I);
+ for (const GlobalVariable &Var : TheModule->globals()) {
+ if (!Var.hasName())
+ CreateModuleSlot(&Var);
+ }
+
+ for (const GlobalAlias &A : TheModule->aliases()) {
+ if (!A.hasName())
+ CreateModuleSlot(&A);
}
// Add metadata used by named metadata.
- for (Module::const_named_metadata_iterator
- I = TheModule->named_metadata_begin(),
- E = TheModule->named_metadata_end(); I != E; ++I) {
- const NamedMDNode *NMD = I;
- for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
- CreateMetadataSlot(NMD->getOperand(i));
+ for (const NamedMDNode &NMD : TheModule->named_metadata()) {
+ for (unsigned i = 0, e = NMD.getNumOperands(); i != e; ++i)
+ CreateMetadataSlot(NMD.getOperand(i));
}
- for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
- I != E; ++I) {
- if (!I->hasName())
+ for (const Function &F : *TheModule) {
+ if (!F.hasName())
// Add all the unnamed functions to the table.
- CreateModuleSlot(I);
+ CreateModuleSlot(&F);
if (ShouldInitializeAllMetadata)
- processFunctionMetadata(*I);
+ processFunctionMetadata(F);
// Add all the function attributes to the table.
// FIXME: Add attributes of other objects?
- AttributeSet FnAttrs = I->getAttributes().getFnAttributes();
+ AttributeSet FnAttrs = F.getAttributes().getFnAttributes();
if (FnAttrs.hasAttributes(AttributeSet::FunctionIndex))
CreateAttributeSetSlot(FnAttrs);
}
@@ -2169,23 +2173,21 @@ void AssemblyWriter::printModule(const Module *M) {
// Output all globals.
if (!M->global_empty()) Out << '\n';
- for (Module::const_global_iterator I = M->global_begin(), E = M->global_end();
- I != E; ++I) {
- printGlobal(I); Out << '\n';
+ for (const GlobalVariable &GV : M->globals()) {
+ printGlobal(&GV); Out << '\n';
}
// Output all aliases.
if (!M->alias_empty()) Out << "\n";
- for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
- I != E; ++I)
- printAlias(I);
+ for (const GlobalAlias &GA : M->aliases())
+ printAlias(&GA);
// Output global use-lists.
printUseLists(nullptr);
// Output all of the functions.
- for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I)
- printFunction(I);
+ for (const Function &F : *M)
+ printFunction(&F);
assert(UseListOrders.empty() && "All use-lists should have been consumed");
// Output all attribute groups.
@@ -2197,9 +2199,8 @@ void AssemblyWriter::printModule(const Module *M) {
// Output named metadata.
if (!M->named_metadata_empty()) Out << '\n';
- for (Module::const_named_metadata_iterator I = M->named_metadata_begin(),
- E = M->named_metadata_end(); I != E; ++I)
- printNamedMDNode(I);
+ for (const NamedMDNode &Node : M->named_metadata())
+ printNamedMDNode(&Node);
// Output metadata.
if (!Machine.mdn_empty()) {
@@ -2364,13 +2365,9 @@ void AssemblyWriter::printAlias(const GlobalAlias *GA) {
if (GA->isMaterializable())
Out << "; Materializable\n";
- // Don't crash when dumping partially built GA
- if (!GA->hasName())
- Out << "<<nameless>> = ";
- else {
- PrintLLVMName(Out, GA);
- Out << " = ";
- }
+ WriteAsOperandInternal(Out, GA, &TypePrinter, &Machine, GA->getParent());
+ Out << " = ";
+
PrintLinkage(GA->getLinkage(), Out);
PrintVisibility(GA->getVisibility(), Out);
PrintDLLStorageClass(GA->getDLLStorageClass(), Out);
@@ -2547,6 +2544,10 @@ void AssemblyWriter::printFunction(const Function *F) {
Out << " prologue ";
writeOperand(F->getPrologueData(), true);
}
+ if (F->hasPersonalityFn()) {
+ Out << " personality ";
+ writeOperand(F->getPersonalityFn(), /*PrintType=*/true);
+ }
SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
F->getAllMetadata(MDs);
@@ -2789,8 +2790,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
} else if (const LandingPadInst *LPI = dyn_cast<LandingPadInst>(&I)) {
Out << ' ';
TypePrinter.print(I.getType(), Out);
- Out << " personality ";
- writeOperand(I.getOperand(0), true); Out << '\n';
+ if (LPI->isCleanup() || LPI->getNumClauses() != 0)
+ Out << '\n';
if (LPI->isCleanup())
Out << " cleanup";
diff --git a/lib/IR/AttributeImpl.h b/lib/IR/AttributeImpl.h
index dbd7d63a892b..8159dcefb5c3 100644
--- a/lib/IR/AttributeImpl.h
+++ b/lib/IR/AttributeImpl.h
@@ -181,6 +181,9 @@ public:
AttrList[I].Profile(ID);
}
};
+static_assert(
+ AlignOf<AttributeSetNode>::Alignment >= AlignOf<Attribute>::Alignment,
+ "Alignment is insufficient for objects appended to AttributeSetNode");
//===----------------------------------------------------------------------===//
/// \class
@@ -189,9 +192,11 @@ public:
class AttributeSetImpl : public FoldingSetNode {
friend class AttributeSet;
- LLVMContext &Context;
-
+public:
typedef std::pair<unsigned, AttributeSetNode*> IndexAttrPair;
+
+private:
+ LLVMContext &Context;
unsigned NumAttrs; ///< Number of entries in this set.
/// \brief Return a pointer to the IndexAttrPair for the specified slot.
@@ -206,6 +211,7 @@ public:
AttributeSetImpl(LLVMContext &C,
ArrayRef<std::pair<unsigned, AttributeSetNode *> > Attrs)
: Context(C), NumAttrs(Attrs.size()) {
+
#ifndef NDEBUG
if (Attrs.size() >= 2) {
for (const std::pair<unsigned, AttributeSetNode *> *i = Attrs.begin() + 1,
@@ -267,7 +273,11 @@ public:
void dump() const;
};
+static_assert(
+ AlignOf<AttributeSetImpl>::Alignment >=
+ AlignOf<AttributeSetImpl::IndexAttrPair>::Alignment,
+ "Alignment is insufficient for objects appended to AttributeSetImpl");
-} // end llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp
index fef05c8f92d9..c3032f4ffc79 100644
--- a/lib/IR/Attributes.cpp
+++ b/lib/IR/Attributes.cpp
@@ -252,6 +252,8 @@ std::string Attribute::getAsString(bool InAttrGrp) const {
return "sspreq";
if (hasAttribute(Attribute::StackProtectStrong))
return "sspstrong";
+ if (hasAttribute(Attribute::SafeStack))
+ return "safestack";
if (hasAttribute(Attribute::StructRet))
return "sret";
if (hasAttribute(Attribute::SanitizeThread))
@@ -437,6 +439,7 @@ uint64_t AttributeImpl::getAttrMask(Attribute::AttrKind Val) {
case Attribute::NonNull: return 1ULL << 44;
case Attribute::JumpTable: return 1ULL << 45;
case Attribute::Convergent: return 1ULL << 46;
+ case Attribute::SafeStack: return 1ULL << 47;
case Attribute::Dereferenceable:
llvm_unreachable("dereferenceable attribute not supported in raw format");
break;
diff --git a/lib/IR/BasicBlock.cpp b/lib/IR/BasicBlock.cpp
index 70ae3c398423..77cb10d5b6ba 100644
--- a/lib/IR/BasicBlock.cpp
+++ b/lib/IR/BasicBlock.cpp
@@ -362,12 +362,15 @@ BasicBlock *BasicBlock::splitBasicBlock(iterator I, const Twine &BBName) {
BasicBlock *New = BasicBlock::Create(getContext(), BBName,
getParent(), InsertBefore);
+ // Save DebugLoc of split point before invalidating iterator.
+ DebugLoc Loc = I->getDebugLoc();
// Move all of the specified instructions from the original basic block into
// the new basic block.
New->getInstList().splice(New->end(), this->getInstList(), I, end());
// Add a branch instruction to the newly formed basic block.
- BranchInst::Create(New, this);
+ BranchInst *BI = BranchInst::Create(New, this);
+ BI->setDebugLoc(Loc);
// Now we must loop through all of the successors of the New block (which
// _were_ the successors of the 'this' block), and update any PHI nodes in
diff --git a/lib/IR/ConstantFold.cpp b/lib/IR/ConstantFold.cpp
index 2efc61242eac..46bb20e0d1b7 100644
--- a/lib/IR/ConstantFold.cpp
+++ b/lib/IR/ConstantFold.cpp
@@ -2163,11 +2163,11 @@ static Constant *ConstantFoldGetElementPtrImpl(Type *PointeeTy, Constant *C,
// Check to see if any array indices are not within the corresponding
// notional array or vector bounds. If so, try to determine if they can be
// factored out into preceding dimensions.
- bool Unknown = false;
SmallVector<Constant *, 8> NewIdxs;
- Type *Ty = C->getType();
- Type *Prev = nullptr;
- for (unsigned i = 0, e = Idxs.size(); i != e;
+ Type *Ty = PointeeTy;
+ Type *Prev = C->getType();
+ bool Unknown = !isa<ConstantInt>(Idxs[0]);
+ for (unsigned i = 1, e = Idxs.size(); i != e;
Prev = Ty, Ty = cast<CompositeType>(Ty)->getTypeAtIndex(Idxs[i]), ++i) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) {
if (isa<ArrayType>(Ty) || isa<VectorType>(Ty))
diff --git a/lib/IR/ConstantFold.h b/lib/IR/ConstantFold.h
index 42a9c6ba908a..715c42958fd0 100644
--- a/lib/IR/ConstantFold.h
+++ b/lib/IR/ConstantFold.h
@@ -55,6 +55,6 @@ namespace llvm {
ArrayRef<Constant *> Idxs);
Constant *ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool inBounds,
ArrayRef<Value *> Idxs);
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/IR/Constants.cpp b/lib/IR/Constants.cpp
index fb83ebbbd878..76c55b6edc9b 100644
--- a/lib/IR/Constants.cpp
+++ b/lib/IR/Constants.cpp
@@ -2389,6 +2389,7 @@ GetElementPtrConstantExpr::GetElementPtrConstantExpr(
IdxList.size() + 1),
SrcElementTy(SrcElementTy) {
Op<0>() = C;
+ Use *OperandList = getOperandList();
for (unsigned i = 0, E = IdxList.size(); i != E; ++i)
OperandList[i+1] = IdxList[i];
}
@@ -2851,6 +2852,7 @@ void ConstantArray::replaceUsesOfWithOnConstant(Value *From, Value *To,
// Keep track of whether all the values in the array are "ToC".
bool AllSame = true;
+ Use *OperandList = getOperandList();
for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) {
Constant *Val = cast<Constant>(O->get());
if (Val == From) {
@@ -2887,6 +2889,7 @@ void ConstantStruct::replaceUsesOfWithOnConstant(Value *From, Value *To,
assert(isa<Constant>(To) && "Cannot make Constant refer to non-constant!");
Constant *ToC = cast<Constant>(To);
+ Use *OperandList = getOperandList();
unsigned OperandToUpdate = U-OperandList;
assert(getOperand(OperandToUpdate) == From && "ReplaceAllUsesWith broken!");
@@ -2955,6 +2958,7 @@ void ConstantVector::replaceUsesOfWithOnConstant(Value *From, Value *To,
}
// Update to the new value.
+ Use *OperandList = getOperandList();
if (Constant *C = getContext().pImpl->VectorConstants.replaceOperandsInPlace(
Values, this, From, ToC, NumUpdated, U - OperandList))
replaceUsesOfWithOnConstantImpl(C);
@@ -2983,6 +2987,7 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV,
}
// Update to the new value.
+ Use *OperandList = getOperandList();
if (Constant *C = getContext().pImpl->ExprConstants.replaceOperandsInPlace(
NewOps, this, From, To, NumUpdated, U - OperandList))
replaceUsesOfWithOnConstantImpl(C);
diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp
index d476434542ea..23e923d41126 100644
--- a/lib/IR/Core.cpp
+++ b/lib/IR/Core.cpp
@@ -2249,11 +2249,8 @@ LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn,
}
LLVMValueRef LLVMBuildLandingPad(LLVMBuilderRef B, LLVMTypeRef Ty,
- LLVMValueRef PersFn, unsigned NumClauses,
- const char *Name) {
- return wrap(unwrap(B)->CreateLandingPad(unwrap(Ty),
- cast<Function>(unwrap(PersFn)),
- NumClauses, Name));
+ unsigned NumClauses, const char *Name) {
+ return wrap(unwrap(B)->CreateLandingPad(unwrap(Ty), NumClauses, Name));
}
LLVMValueRef LLVMBuildResume(LLVMBuilderRef B, LLVMValueRef Exn) {
diff --git a/lib/IR/DIBuilder.cpp b/lib/IR/DIBuilder.cpp
index b1925ea5c48f..c41d84492b86 100644
--- a/lib/IR/DIBuilder.cpp
+++ b/lib/IR/DIBuilder.cpp
@@ -55,7 +55,7 @@ public:
return HeaderBuilder().concat("0x" + Twine::utohexstr(Tag));
}
};
-}
+} // namespace
DIBuilder::DIBuilder(Module &m, bool AllowUnresolvedNodes)
: M(m), VMContext(M.getContext()), TempEnumTypes(nullptr),
@@ -327,7 +327,8 @@ DIBuilder::createObjCProperty(StringRef Name, DIFile *File, unsigned LineNumber,
StringRef GetterName, StringRef SetterName,
unsigned PropertyAttributes, DIType *Ty) {
return DIObjCProperty::get(VMContext, Name, File, LineNumber, GetterName,
- SetterName, PropertyAttributes, Ty);
+ SetterName, PropertyAttributes,
+ DITypeRef::get(Ty));
}
DITemplateTypeParameter *
diff --git a/lib/IR/DiagnosticInfo.cpp b/lib/IR/DiagnosticInfo.cpp
index 45be100ae053..5de928965f2c 100644
--- a/lib/IR/DiagnosticInfo.cpp
+++ b/lib/IR/DiagnosticInfo.cpp
@@ -22,9 +22,9 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/Atomic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Regex.h"
+#include <atomic>
#include <string>
using namespace llvm;
@@ -84,11 +84,11 @@ PassRemarksAnalysis(
"the given regular expression"),
cl::Hidden, cl::location(PassRemarksAnalysisOptLoc), cl::ValueRequired,
cl::ZeroOrMore);
-}
+} // namespace
int llvm::getNextAvailablePluginDiagnosticKind() {
- static sys::cas_flag PluginKindID = DK_FirstPluginKind;
- return (int)sys::AtomicIncrement(&PluginKindID);
+ static std::atomic<int> PluginKindID(DK_FirstPluginKind);
+ return ++PluginKindID;
}
DiagnosticInfoInlineAsm::DiagnosticInfoInlineAsm(const Instruction &I,
@@ -170,6 +170,10 @@ bool DiagnosticInfoOptimizationRemarkAnalysis::isEnabled() const {
PassRemarksAnalysisOptLoc.Pattern->match(getPassName());
}
+void DiagnosticInfoMIRParser::print(DiagnosticPrinter &DP) const {
+ DP << Diagnostic;
+}
+
void llvm::emitOptimizationRemark(LLVMContext &Ctx, const char *PassName,
const Function &Fn, const DebugLoc &DLoc,
const Twine &Msg) {
diff --git a/lib/IR/DiagnosticPrinter.cpp b/lib/IR/DiagnosticPrinter.cpp
index f25fc20a197b..659ff49d623f 100644
--- a/lib/IR/DiagnosticPrinter.cpp
+++ b/lib/IR/DiagnosticPrinter.cpp
@@ -16,6 +16,7 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/SourceMgr.h"
using namespace llvm;
@@ -105,3 +106,12 @@ DiagnosticPrinter &DiagnosticPrinterRawOStream::operator<<(const Module &M) {
Stream << M.getModuleIdentifier();
return *this;
}
+
+// Other types.
+DiagnosticPrinter &DiagnosticPrinterRawOStream::
+operator<<(const SMDiagnostic &Diag) {
+ // We don't have to print the SMDiagnostic kind, as the diagnostic severity
+ // is printed by the diagnostic handler.
+ Diag.print("", Stream, /*ShowColors=*/true, /*ShowKindLabel=*/false);
+ return *this;
+}
diff --git a/lib/IR/Function.cpp b/lib/IR/Function.cpp
index cf8e3ed571e3..b50ad1262c69 100644
--- a/lib/IR/Function.cpp
+++ b/lib/IR/Function.cpp
@@ -154,10 +154,8 @@ bool Argument::hasNoCaptureAttr() const {
/// it in its containing function.
bool Argument::hasStructRetAttr() const {
if (!getType()->isPointerTy()) return false;
- if (this != getParent()->arg_begin())
- return false; // StructRet param must be first param
return getParent()->getAttributes().
- hasAttribute(1, Attribute::StructRet);
+ hasAttribute(getArgNo()+1, Attribute::StructRet);
}
/// hasReturnedAttr - Return true if this argument has the returned attribute on
@@ -250,8 +248,8 @@ void Function::eraseFromParent() {
Function::Function(FunctionType *Ty, LinkageTypes Linkage, const Twine &name,
Module *ParentModule)
- : GlobalObject(PointerType::getUnqual(Ty), Value::FunctionVal, nullptr, 0,
- Linkage, name),
+ : GlobalObject(PointerType::getUnqual(Ty), Value::FunctionVal,
+ OperandTraits<Function>::op_begin(this), 0, Linkage, name),
Ty(Ty) {
assert(FunctionType::isValidReturnType(getReturnType()) &&
"invalid return type");
@@ -281,6 +279,9 @@ Function::~Function() {
// Remove the function from the on-the-side GC table.
clearGC();
+
+ // FIXME: needed by operator delete
+ setFunctionNumOperands(1);
}
void Function::BuildLazyArguments() const {
@@ -333,6 +334,8 @@ void Function::dropAllReferences() {
// Metadata is stored in a side-table.
clearMetadata();
+
+ setPersonalityFn(nullptr);
}
void Function::addAttribute(unsigned i, Attribute::AttrKind attr) {
@@ -428,6 +431,10 @@ void Function::copyAttributesFrom(const GlobalValue *Src) {
setPrologueData(SrcF->getPrologueData());
else
setPrologueData(nullptr);
+ if (SrcF->hasPersonalityFn())
+ setPersonalityFn(SrcF->getPersonalityFn());
+ else
+ setPersonalityFn(nullptr);
}
/// \brief This does the actual lookup of an intrinsic ID which
@@ -839,6 +846,18 @@ bool Intrinsic::isOverloaded(ID id) {
#undef GET_INTRINSIC_OVERLOAD_TABLE
}
+bool Intrinsic::isLeaf(ID id) {
+ switch (id) {
+ default:
+ return true;
+
+ case Intrinsic::experimental_gc_statepoint:
+ case Intrinsic::experimental_patchpoint_void:
+ case Intrinsic::experimental_patchpoint_i64:
+ return false;
+ }
+}
+
/// This defines the "Intrinsic::getAttributes(ID id)" method.
#define GET_INTRINSIC_ATTRIBUTES
#include "llvm/IR/Intrinsics.gen"
@@ -978,3 +997,22 @@ Optional<uint64_t> Function::getEntryCount() const {
}
return None;
}
+
+void Function::setPersonalityFn(Constant *C) {
+ if (!C) {
+ if (hasPersonalityFn()) {
+ // Note, the num operands is used to compute the offset of the operand, so
+ // the order here matters. Clearing the operand then clearing the num
+ // operands ensures we have the correct offset to the operand.
+ Op<0>().set(nullptr);
+ setFunctionNumOperands(0);
+ }
+ } else {
+ // Note, the num operands is used to compute the offset of the operand, so
+ // the order here matters. We need to set num operands to 1 first so that
+ // we get the correct offset to the first operand when we set it.
+ if (!hasPersonalityFn())
+ setFunctionNumOperands(1);
+ Op<0>().set(C);
+ }
+}
diff --git a/lib/IR/GCOV.cpp b/lib/IR/GCOV.cpp
index 6ed589131725..a0a3db42a7b7 100644
--- a/lib/IR/GCOV.cpp
+++ b/lib/IR/GCOV.cpp
@@ -496,7 +496,7 @@ public:
OS << format("%5u:", LineNum) << Line << "\n";
}
};
-}
+} // namespace
/// Convert a path to a gcov filename. If PreservePaths is true, this
/// translates "/" to "#", ".." to "^", and drops ".", to match gcov.
diff --git a/lib/IR/Globals.cpp b/lib/IR/Globals.cpp
index 1028e33eae64..79a458c26f77 100644
--- a/lib/IR/Globals.cpp
+++ b/lib/IR/Globals.cpp
@@ -214,14 +214,20 @@ void GlobalVariable::replaceUsesOfWithOnConstant(Value *From, Value *To,
void GlobalVariable::setInitializer(Constant *InitVal) {
if (!InitVal) {
if (hasInitializer()) {
+ // Note, the num operands is used to compute the offset of the operand, so
+ // the order here matters. Clearing the operand then clearing the num
+ // operands ensures we have the correct offset to the operand.
Op<0>().set(nullptr);
- NumOperands = 0;
+ setGlobalVariableNumOperands(0);
}
} else {
assert(InitVal->getType() == getType()->getElementType() &&
"Initializer type must match GlobalVariable type");
+ // Note, the num operands is used to compute the offset of the operand, so
+ // the order here matters. We need to set num operands to 1 first so that
+ // we get the correct offset to the first operand when we set it.
if (!hasInitializer())
- NumOperands = 1;
+ setGlobalVariableNumOperands(1);
Op<0>().set(InitVal);
}
}
diff --git a/lib/IR/IRBuilder.cpp b/lib/IR/IRBuilder.cpp
index 335cf363c367..bddb278dee79 100644
--- a/lib/IR/IRBuilder.cpp
+++ b/lib/IR/IRBuilder.cpp
@@ -25,13 +25,15 @@ using namespace llvm;
/// specified. If Name is specified, it is the name of the global variable
/// created.
GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
- const Twine &Name) {
+ const Twine &Name,
+ unsigned AddressSpace) {
Constant *StrConstant = ConstantDataArray::getString(Context, Str);
Module &M = *BB->getParent()->getParent();
GlobalVariable *GV = new GlobalVariable(M, StrConstant->getType(),
true, GlobalValue::PrivateLinkage,
- StrConstant);
- GV->setName(Name);
+ StrConstant, Name, nullptr,
+ GlobalVariable::NotThreadLocal,
+ AddressSpace);
GV->setUnnamedAddr(true);
return GV;
}
diff --git a/lib/IR/IRPrintingPasses.cpp b/lib/IR/IRPrintingPasses.cpp
index c1ac336c1fbf..03e7d55383b7 100644
--- a/lib/IR/IRPrintingPasses.cpp
+++ b/lib/IR/IRPrintingPasses.cpp
@@ -103,7 +103,7 @@ public:
}
};
-}
+} // namespace
char PrintModulePassWrapper::ID = 0;
INITIALIZE_PASS(PrintModulePassWrapper, "print-module",
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index 45bb296602ca..af426387be79 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -26,9 +26,9 @@ Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
// If requested, insert this instruction into a basic block...
if (InsertBefore) {
- assert(InsertBefore->getParent() &&
- "Instruction to insert before is not in a basic block!");
- InsertBefore->getParent()->getInstList().insert(InsertBefore, this);
+ BasicBlock *BB = InsertBefore->getParent();
+ assert(BB && "Instruction to insert before is not in a basic block!");
+ BB->getInstList().insert(InsertBefore, this);
}
}
diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp
index 1478bffe7c35..d45b51105361 100644
--- a/lib/IR/Instructions.cpp
+++ b/lib/IR/Instructions.cpp
@@ -85,30 +85,14 @@ const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
//===----------------------------------------------------------------------===//
PHINode::PHINode(const PHINode &PN)
- : Instruction(PN.getType(), Instruction::PHI,
- allocHungoffUses(PN.getNumOperands()), PN.getNumOperands()),
- ReservedSpace(PN.getNumOperands()) {
+ : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
+ ReservedSpace(PN.getNumOperands()) {
+ allocHungoffUses(PN.getNumOperands());
std::copy(PN.op_begin(), PN.op_end(), op_begin());
std::copy(PN.block_begin(), PN.block_end(), block_begin());
SubclassOptionalData = PN.SubclassOptionalData;
}
-PHINode::~PHINode() {
- dropHungoffUses();
-}
-
-Use *PHINode::allocHungoffUses(unsigned N) const {
- // Allocate the array of Uses of the incoming values, followed by a pointer
- // (with bottom bit set) to the User, followed by the array of pointers to
- // the incoming basic blocks.
- size_t size = N * sizeof(Use) + sizeof(Use::UserRef)
- + N * sizeof(BasicBlock*);
- Use *Begin = static_cast<Use*>(::operator new(size));
- Use *End = Begin + N;
- (void) new(End) Use::UserRef(const_cast<PHINode*>(this), 1);
- return Use::initTags(Begin, End);
-}
-
// removeIncomingValue - Remove an incoming value. This is useful if a
// predecessor basic block is deleted.
Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
@@ -124,7 +108,7 @@ Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
// Nuke the last value.
Op<-1>().set(nullptr);
- --NumOperands;
+ setNumHungOffUseOperands(getNumOperands() - 1);
// If the PHI node is dead, because it has zero entries, nuke it now.
if (getNumOperands() == 0 && DeletePHIIfEmpty) {
@@ -144,16 +128,8 @@ void PHINode::growOperands() {
unsigned NumOps = e + e / 2;
if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
- Use *OldOps = op_begin();
- BasicBlock **OldBlocks = block_begin();
-
ReservedSpace = NumOps;
- OperandList = allocHungoffUses(ReservedSpace);
-
- std::copy(OldOps, OldOps + e, op_begin());
- std::copy(OldBlocks, OldBlocks + e, block_begin());
-
- Use::zap(OldOps, OldOps + e, true);
+ growHungoffUses(ReservedSpace, /* IsPhi */ true);
}
/// hasConstantValue - If the specified PHI node always merges together the same
@@ -177,57 +153,47 @@ Value *PHINode::hasConstantValue() const {
// LandingPadInst Implementation
//===----------------------------------------------------------------------===//
-LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
- unsigned NumReservedValues, const Twine &NameStr,
- Instruction *InsertBefore)
- : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
- init(PersonalityFn, 1 + NumReservedValues, NameStr);
+LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, Instruction *InsertBefore)
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
+ init(NumReservedValues, NameStr);
}
-LandingPadInst::LandingPadInst(Type *RetTy, Value *PersonalityFn,
- unsigned NumReservedValues, const Twine &NameStr,
- BasicBlock *InsertAtEnd)
- : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
- init(PersonalityFn, 1 + NumReservedValues, NameStr);
+LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
+ init(NumReservedValues, NameStr);
}
LandingPadInst::LandingPadInst(const LandingPadInst &LP)
- : Instruction(LP.getType(), Instruction::LandingPad,
- allocHungoffUses(LP.getNumOperands()), LP.getNumOperands()),
- ReservedSpace(LP.getNumOperands()) {
- Use *OL = OperandList, *InOL = LP.OperandList;
+ : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
+ LP.getNumOperands()),
+ ReservedSpace(LP.getNumOperands()) {
+ allocHungoffUses(LP.getNumOperands());
+ Use *OL = getOperandList();
+ const Use *InOL = LP.getOperandList();
for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
OL[I] = InOL[I];
setCleanup(LP.isCleanup());
}
-LandingPadInst::~LandingPadInst() {
- dropHungoffUses();
-}
-
-LandingPadInst *LandingPadInst::Create(Type *RetTy, Value *PersonalityFn,
- unsigned NumReservedClauses,
+LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
const Twine &NameStr,
Instruction *InsertBefore) {
- return new LandingPadInst(RetTy, PersonalityFn, NumReservedClauses, NameStr,
- InsertBefore);
+ return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
}
-LandingPadInst *LandingPadInst::Create(Type *RetTy, Value *PersonalityFn,
- unsigned NumReservedClauses,
+LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
const Twine &NameStr,
BasicBlock *InsertAtEnd) {
- return new LandingPadInst(RetTy, PersonalityFn, NumReservedClauses, NameStr,
- InsertAtEnd);
+ return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
}
-void LandingPadInst::init(Value *PersFn, unsigned NumReservedValues,
- const Twine &NameStr) {
+void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
ReservedSpace = NumReservedValues;
- NumOperands = 1;
- OperandList = allocHungoffUses(ReservedSpace);
- Op<0>() = PersFn;
+ setNumHungOffUseOperands(0);
+ allocHungoffUses(ReservedSpace);
setName(NameStr);
setCleanup(false);
}
@@ -237,23 +203,16 @@ void LandingPadInst::init(Value *PersFn, unsigned NumReservedValues,
void LandingPadInst::growOperands(unsigned Size) {
unsigned e = getNumOperands();
if (ReservedSpace >= e + Size) return;
- ReservedSpace = (e + Size / 2) * 2;
-
- Use *NewOps = allocHungoffUses(ReservedSpace);
- Use *OldOps = OperandList;
- for (unsigned i = 0; i != e; ++i)
- NewOps[i] = OldOps[i];
-
- OperandList = NewOps;
- Use::zap(OldOps, OldOps + e, true);
+ ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
+ growHungoffUses(ReservedSpace);
}
void LandingPadInst::addClause(Constant *Val) {
unsigned OpNo = getNumOperands();
growOperands(1);
assert(OpNo < ReservedSpace && "Growing didn't work!");
- ++NumOperands;
- OperandList[OpNo] = Val;
+ setNumHungOffUseOperands(getNumOperands() + 1);
+ getOperandList()[OpNo] = Val;
}
//===----------------------------------------------------------------------===//
@@ -266,7 +225,7 @@ CallInst::~CallInst() {
void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
const Twine &NameStr) {
this->FTy = FTy;
- assert(NumOperands == Args.size() + 1 && "NumOperands not set up?");
+ assert(getNumOperands() == Args.size() + 1 && "NumOperands not set up?");
Op<-1>() = Func;
#ifndef NDEBUG
@@ -287,7 +246,7 @@ void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
void CallInst::init(Value *Func, const Twine &NameStr) {
FTy =
cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
- assert(NumOperands == 1 && "NumOperands not set up?");
+ assert(getNumOperands() == 1 && "NumOperands not set up?");
Op<-1>() = Func;
assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
@@ -542,7 +501,7 @@ void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
const Twine &NameStr) {
this->FTy = FTy;
- assert(NumOperands == 3 + Args.size() && "NumOperands not set up?");
+ assert(getNumOperands() == 3 + Args.size() && "NumOperands not set up?");
Op<-3>() = Fn;
Op<-2>() = IfNormal;
Op<-1>() = IfException;
@@ -1238,7 +1197,8 @@ FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
const Twine &Name) {
- assert(NumOperands == 1 + IdxList.size() && "NumOperands not initialized?");
+ assert(getNumOperands() == 1 + IdxList.size() &&
+ "NumOperands not initialized?");
Op<0>() = Ptr;
std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1);
setName(Name);
@@ -1551,7 +1511,7 @@ void ShuffleVectorInst::getShuffleMask(Constant *Mask,
void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
const Twine &Name) {
- assert(NumOperands == 2 && "NumOperands not initialized?");
+ assert(getNumOperands() == 2 && "NumOperands not initialized?");
// There's no fundamental reason why we require at least one index
// (other than weirdness with &*IdxBegin being invalid; see
@@ -1582,7 +1542,7 @@ InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
//===----------------------------------------------------------------------===//
void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
- assert(NumOperands == 1 && "NumOperands not initialized?");
+ assert(getNumOperands() == 1 && "NumOperands not initialized?");
// There's no fundamental reason why we require at least one index.
// But there's no present need to support it.
@@ -3296,8 +3256,8 @@ bool CmpInst::isFalseWhenEqual(unsigned short predicate) {
void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
assert(Value && Default && NumReserved);
ReservedSpace = NumReserved;
- NumOperands = 2;
- OperandList = allocHungoffUses(ReservedSpace);
+ setNumHungOffUseOperands(2);
+ allocHungoffUses(ReservedSpace);
Op<0>() = Value;
Op<1>() = Default;
@@ -3328,8 +3288,9 @@ SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
SwitchInst::SwitchInst(const SwitchInst &SI)
: TerminatorInst(SI.getType(), Instruction::Switch, nullptr, 0) {
init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
- NumOperands = SI.getNumOperands();
- Use *OL = OperandList, *InOL = SI.OperandList;
+ setNumHungOffUseOperands(SI.getNumOperands());
+ Use *OL = getOperandList();
+ const Use *InOL = SI.getOperandList();
for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
OL[i] = InOL[i];
OL[i+1] = InOL[i+1];
@@ -3337,21 +3298,17 @@ SwitchInst::SwitchInst(const SwitchInst &SI)
SubclassOptionalData = SI.SubclassOptionalData;
}
-SwitchInst::~SwitchInst() {
- dropHungoffUses();
-}
-
/// addCase - Add an entry to the switch instruction...
///
void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
- unsigned NewCaseIdx = getNumCases();
- unsigned OpNo = NumOperands;
+ unsigned NewCaseIdx = getNumCases();
+ unsigned OpNo = getNumOperands();
if (OpNo+2 > ReservedSpace)
growOperands(); // Get more space!
// Initialize some new operands.
assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
- NumOperands = OpNo+2;
+ setNumHungOffUseOperands(OpNo+2);
CaseIt Case(this, NewCaseIdx);
Case.setValue(OnVal);
Case.setSuccessor(Dest);
@@ -3365,7 +3322,7 @@ void SwitchInst::removeCase(CaseIt i) {
assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
unsigned NumOps = getNumOperands();
- Use *OL = OperandList;
+ Use *OL = getOperandList();
// Overwrite this case with the end of the list.
if (2 + (idx + 1) * 2 != NumOps) {
@@ -3376,7 +3333,7 @@ void SwitchInst::removeCase(CaseIt i) {
// Nuke the last value.
OL[NumOps-2].set(nullptr);
OL[NumOps-2+1].set(nullptr);
- NumOperands = NumOps-2;
+ setNumHungOffUseOperands(NumOps-2);
}
/// growOperands - grow operands - This grows the operand list in response
@@ -3387,13 +3344,7 @@ void SwitchInst::growOperands() {
unsigned NumOps = e*3;
ReservedSpace = NumOps;
- Use *NewOps = allocHungoffUses(NumOps);
- Use *OldOps = OperandList;
- for (unsigned i = 0; i != e; ++i) {
- NewOps[i] = OldOps[i];
- }
- OperandList = NewOps;
- Use::zap(OldOps, OldOps + e, true);
+ growHungoffUses(ReservedSpace);
}
@@ -3415,9 +3366,9 @@ void IndirectBrInst::init(Value *Address, unsigned NumDests) {
assert(Address && Address->getType()->isPointerTy() &&
"Address of indirectbr must be a pointer");
ReservedSpace = 1+NumDests;
- NumOperands = 1;
- OperandList = allocHungoffUses(ReservedSpace);
-
+ setNumHungOffUseOperands(1);
+ allocHungoffUses(ReservedSpace);
+
Op<0>() = Address;
}
@@ -3430,12 +3381,7 @@ void IndirectBrInst::growOperands() {
unsigned NumOps = e*2;
ReservedSpace = NumOps;
- Use *NewOps = allocHungoffUses(NumOps);
- Use *OldOps = OperandList;
- for (unsigned i = 0; i != e; ++i)
- NewOps[i] = OldOps[i];
- OperandList = NewOps;
- Use::zap(OldOps, OldOps + e, true);
+ growHungoffUses(ReservedSpace);
}
IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
@@ -3453,29 +3399,26 @@ IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
}
IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
- : TerminatorInst(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
- allocHungoffUses(IBI.getNumOperands()),
- IBI.getNumOperands()) {
- Use *OL = OperandList, *InOL = IBI.OperandList;
+ : TerminatorInst(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
+ nullptr, IBI.getNumOperands()) {
+ allocHungoffUses(IBI.getNumOperands());
+ Use *OL = getOperandList();
+ const Use *InOL = IBI.getOperandList();
for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
OL[i] = InOL[i];
SubclassOptionalData = IBI.SubclassOptionalData;
}
-IndirectBrInst::~IndirectBrInst() {
- dropHungoffUses();
-}
-
/// addDestination - Add a destination.
///
void IndirectBrInst::addDestination(BasicBlock *DestBB) {
- unsigned OpNo = NumOperands;
+ unsigned OpNo = getNumOperands();
if (OpNo+1 > ReservedSpace)
growOperands(); // Get more space!
// Initialize some new operands.
assert(OpNo < ReservedSpace && "Growing didn't work!");
- NumOperands = OpNo+1;
- OperandList[OpNo] = DestBB;
+ setNumHungOffUseOperands(OpNo+1);
+ getOperandList()[OpNo] = DestBB;
}
/// removeDestination - This method removes the specified successor from the
@@ -3484,14 +3427,14 @@ void IndirectBrInst::removeDestination(unsigned idx) {
assert(idx < getNumOperands()-1 && "Successor index out of range!");
unsigned NumOps = getNumOperands();
- Use *OL = OperandList;
+ Use *OL = getOperandList();
// Replace this value with the last one.
OL[idx+1] = OL[NumOps-1];
// Nuke the last value.
OL[NumOps-1].set(nullptr);
- NumOperands = NumOps-1;
+ setNumHungOffUseOperands(NumOps-1);
}
BasicBlock *IndirectBrInst::getSuccessorV(unsigned idx) const {
diff --git a/lib/IR/LLVMContext.cpp b/lib/IR/LLVMContext.cpp
index 7bcd829f9f5e..6d799e4b9650 100644
--- a/lib/IR/LLVMContext.cpp
+++ b/lib/IR/LLVMContext.cpp
@@ -199,6 +199,20 @@ static bool isDiagnosticEnabled(const DiagnosticInfo &DI) {
return true;
}
+static const char *getDiagnosticMessagePrefix(DiagnosticSeverity Severity) {
+ switch (Severity) {
+ case DS_Error:
+ return "error";
+ case DS_Warning:
+ return "warning";
+ case DS_Remark:
+ return "remark";
+ case DS_Note:
+ return "note";
+ }
+ llvm_unreachable("Unknown DiagnosticSeverity");
+}
+
void LLVMContext::diagnose(const DiagnosticInfo &DI) {
// If there is a report handler, use it.
if (pImpl->DiagnosticHandler) {
@@ -211,25 +225,12 @@ void LLVMContext::diagnose(const DiagnosticInfo &DI) {
return;
// Otherwise, print the message with a prefix based on the severity.
- std::string MsgStorage;
- raw_string_ostream Stream(MsgStorage);
- DiagnosticPrinterRawOStream DP(Stream);
+ DiagnosticPrinterRawOStream DP(errs());
+ errs() << getDiagnosticMessagePrefix(DI.getSeverity()) << ": ";
DI.print(DP);
- Stream.flush();
- switch (DI.getSeverity()) {
- case DS_Error:
- errs() << "error: " << MsgStorage << "\n";
+ errs() << "\n";
+ if (DI.getSeverity() == DS_Error)
exit(1);
- case DS_Warning:
- errs() << "warning: " << MsgStorage << "\n";
- break;
- case DS_Remark:
- errs() << "remark: " << MsgStorage << "\n";
- break;
- case DS_Note:
- errs() << "note: " << MsgStorage << "\n";
- break;
- }
}
void LLVMContext::emitError(unsigned LocCookie, const Twine &ErrorStr) {
diff --git a/lib/IR/LLVMContextImpl.cpp b/lib/IR/LLVMContextImpl.cpp
index 1e2080770fcd..d3d2fcd577d4 100644
--- a/lib/IR/LLVMContextImpl.cpp
+++ b/lib/IR/LLVMContextImpl.cpp
@@ -65,7 +65,7 @@ struct DropFirst {
P.first->dropAllReferences();
}
};
-}
+} // namespace
LLVMContextImpl::~LLVMContextImpl() {
// NOTE: We need to delete the contents of OwnedModules, but Module's dtor
@@ -199,7 +199,7 @@ namespace llvm {
/// does not cause MDOperand to be transparent. In particular, a bare pointer
/// doesn't get hashed before it's combined, whereas \a MDOperand would.
static const Metadata *get_hashable_data(const MDOperand &X) { return X.get(); }
-}
+} // namespace llvm
unsigned MDNodeOpsKey::calculateHash(MDNode *N, unsigned Offset) {
unsigned Hash = hash_combine_range(N->op_begin() + Offset, N->op_end());
diff --git a/lib/IR/LLVMContextImpl.h b/lib/IR/LLVMContextImpl.h
index 3a573362b411..41a898b42a75 100644
--- a/lib/IR/LLVMContextImpl.h
+++ b/lib/IR/LLVMContextImpl.h
@@ -1025,6 +1025,6 @@ public:
void dropTriviallyDeadConstantArrays();
};
-}
+} // namespace llvm
#endif
diff --git a/lib/IR/LegacyPassManager.cpp b/lib/IR/LegacyPassManager.cpp
index 27d98a279fe2..881d7802580e 100644
--- a/lib/IR/LegacyPassManager.cpp
+++ b/lib/IR/LegacyPassManager.cpp
@@ -275,8 +275,8 @@ public:
void FunctionPassManagerImpl::anchor() {}
char FunctionPassManagerImpl::ID = 0;
-} // End of legacy namespace
-} // End of llvm namespace
+} // namespace legacy
+} // namespace llvm
namespace {
//===----------------------------------------------------------------------===//
@@ -439,8 +439,8 @@ public:
void PassManagerImpl::anchor() {}
char PassManagerImpl::ID = 0;
-} // End of legacy namespace
-} // End of llvm namespace
+} // namespace legacy
+} // namespace llvm
namespace {
@@ -486,7 +486,7 @@ public:
}
};
-} // End of anon namespace
+} // namespace
static TimingInfo *TheTimeInfo;
diff --git a/lib/IR/Metadata.cpp b/lib/IR/Metadata.cpp
index 75b4046ef442..1abcf0d18c91 100644
--- a/lib/IR/Metadata.cpp
+++ b/lib/IR/Metadata.cpp
@@ -381,20 +381,35 @@ StringRef MDString::getString() const {
// MDNode implementation.
//
+// Assert that the MDNode types will not be unaligned by the objects
+// prepended to them.
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ static_assert( \
+ llvm::AlignOf<uint64_t>::Alignment >= llvm::AlignOf<CLASS>::Alignment, \
+ "Alignment is insufficient after objects prepended to " #CLASS);
+#include "llvm/IR/Metadata.def"
+
void *MDNode::operator new(size_t Size, unsigned NumOps) {
- void *Ptr = ::operator new(Size + NumOps * sizeof(MDOperand));
+ size_t OpSize = NumOps * sizeof(MDOperand);
+ // uint64_t is the most aligned type we need support (ensured by static_assert
+ // above)
+ OpSize = RoundUpToAlignment(OpSize, llvm::alignOf<uint64_t>());
+ void *Ptr = reinterpret_cast<char *>(::operator new(OpSize + Size)) + OpSize;
MDOperand *O = static_cast<MDOperand *>(Ptr);
- for (MDOperand *E = O + NumOps; O != E; ++O)
- (void)new (O) MDOperand;
- return O;
+ for (MDOperand *E = O - NumOps; O != E; --O)
+ (void)new (O - 1) MDOperand;
+ return Ptr;
}
void MDNode::operator delete(void *Mem) {
MDNode *N = static_cast<MDNode *>(Mem);
+ size_t OpSize = N->NumOperands * sizeof(MDOperand);
+ OpSize = RoundUpToAlignment(OpSize, llvm::alignOf<uint64_t>());
+
MDOperand *O = static_cast<MDOperand *>(Mem);
for (MDOperand *E = O - N->NumOperands; O != E; --O)
(O - 1)->~MDOperand();
- ::operator delete(O);
+ ::operator delete(reinterpret_cast<char *>(Mem) - OpSize);
}
MDNode::MDNode(LLVMContext &Context, unsigned ID, StorageType Storage,
diff --git a/lib/IR/Operator.cpp b/lib/IR/Operator.cpp
index 77dc680af110..bea1f80d9bf6 100644
--- a/lib/IR/Operator.cpp
+++ b/lib/IR/Operator.cpp
@@ -41,4 +41,4 @@ bool GEPOperator::accumulateConstantOffset(const DataLayout &DL,
}
return true;
}
-}
+} // namespace llvm
diff --git a/lib/IR/Pass.cpp b/lib/IR/Pass.cpp
index df45460a6cca..2fa1e7c85d4f 100644
--- a/lib/IR/Pass.cpp
+++ b/lib/IR/Pass.cpp
@@ -249,7 +249,7 @@ namespace {
CFGOnlyList.push_back(P->getTypeInfo());
}
};
-}
+} // namespace
// setPreservesCFG - This function should be called to by the pass, iff they do
// not:
diff --git a/lib/IR/SymbolTableListTraitsImpl.h b/lib/IR/SymbolTableListTraitsImpl.h
index a18f98261abc..f94def7d3d09 100644
--- a/lib/IR/SymbolTableListTraitsImpl.h
+++ b/lib/IR/SymbolTableListTraitsImpl.h
@@ -113,6 +113,6 @@ void SymbolTableListTraits<ValueSubClass,ItemParentClass>
}
}
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/IR/TypeFinder.cpp b/lib/IR/TypeFinder.cpp
index 1d2b808d650e..7accc5bef535 100644
--- a/lib/IR/TypeFinder.cpp
+++ b/lib/IR/TypeFinder.cpp
@@ -50,6 +50,9 @@ void TypeFinder::run(const Module &M, bool onlyNamed) {
if (FI->hasPrologueData())
incorporateValue(FI->getPrologueData());
+ if (FI->hasPersonalityFn())
+ incorporateValue(FI->getPersonalityFn());
+
// First incorporate the arguments.
for (Function::const_arg_iterator AI = FI->arg_begin(),
AE = FI->arg_end(); AI != AE; ++AI)
diff --git a/lib/IR/Use.cpp b/lib/IR/Use.cpp
index cae845d99fe5..fd06fdbb46b1 100644
--- a/lib/IR/Use.cpp
+++ b/lib/IR/Use.cpp
@@ -124,4 +124,4 @@ const Use *Use::getImpliedUser() const {
}
}
-} // End llvm namespace
+} // namespace llvm
diff --git a/lib/IR/User.cpp b/lib/IR/User.cpp
index ee83eacf2b2b..21f48493d3b5 100644
--- a/lib/IR/User.cpp
+++ b/lib/IR/User.cpp
@@ -13,6 +13,7 @@
#include "llvm/IR/Operator.h"
namespace llvm {
+class BasicBlock;
//===----------------------------------------------------------------------===//
// User Class
@@ -39,41 +40,100 @@ void User::replaceUsesOfWith(Value *From, Value *To) {
// User allocHungoffUses Implementation
//===----------------------------------------------------------------------===//
-Use *User::allocHungoffUses(unsigned N) const {
+void User::allocHungoffUses(unsigned N, bool IsPhi) {
+ assert(HasHungOffUses && "alloc must have hung off uses");
+
+ static_assert(AlignOf<Use>::Alignment >= AlignOf<Use::UserRef>::Alignment,
+ "Alignment is insufficient for 'hung-off-uses' pieces");
+ static_assert(AlignOf<Use::UserRef>::Alignment >=
+ AlignOf<BasicBlock *>::Alignment,
+ "Alignment is insufficient for 'hung-off-uses' pieces");
+
// Allocate the array of Uses, followed by a pointer (with bottom bit set) to
// the User.
size_t size = N * sizeof(Use) + sizeof(Use::UserRef);
+ if (IsPhi)
+ size += N * sizeof(BasicBlock *);
Use *Begin = static_cast<Use*>(::operator new(size));
Use *End = Begin + N;
(void) new(End) Use::UserRef(const_cast<User*>(this), 1);
- return Use::initTags(Begin, End);
+ setOperandList(Use::initTags(Begin, End));
+}
+
+void User::growHungoffUses(unsigned NewNumUses, bool IsPhi) {
+ assert(HasHungOffUses && "realloc must have hung off uses");
+
+ unsigned OldNumUses = getNumOperands();
+
+ // We don't support shrinking the number of uses. We wouldn't have enough
+ // space to copy the old uses in to the new space.
+ assert(NewNumUses > OldNumUses && "realloc must grow num uses");
+
+ Use *OldOps = getOperandList();
+ allocHungoffUses(NewNumUses, IsPhi);
+ Use *NewOps = getOperandList();
+
+ // Now copy from the old operands list to the new one.
+ std::copy(OldOps, OldOps + OldNumUses, NewOps);
+
+ // If this is a Phi, then we need to copy the BB pointers too.
+ if (IsPhi) {
+ auto *OldPtr =
+ reinterpret_cast<char *>(OldOps + OldNumUses) + sizeof(Use::UserRef);
+ auto *NewPtr =
+ reinterpret_cast<char *>(NewOps + NewNumUses) + sizeof(Use::UserRef);
+ std::copy(OldPtr, OldPtr + (OldNumUses * sizeof(BasicBlock *)), NewPtr);
+ }
+ Use::zap(OldOps, OldOps + OldNumUses, true);
}
//===----------------------------------------------------------------------===//
// User operator new Implementations
//===----------------------------------------------------------------------===//
-void *User::operator new(size_t s, unsigned Us) {
- void *Storage = ::operator new(s + sizeof(Use) * Us);
+void *User::operator new(size_t Size, unsigned Us) {
+ assert(Us < (1u << NumUserOperandsBits) && "Too many operands");
+ void *Storage = ::operator new(Size + sizeof(Use) * Us);
Use *Start = static_cast<Use*>(Storage);
Use *End = Start + Us;
User *Obj = reinterpret_cast<User*>(End);
- Obj->OperandList = Start;
- Obj->NumOperands = Us;
+ Obj->NumUserOperands = Us;
+ Obj->HasHungOffUses = false;
Use::initTags(Start, End);
return Obj;
}
+void *User::operator new(size_t Size) {
+ // Allocate space for a single Use*
+ void *Storage = ::operator new(Size + sizeof(Use *));
+ Use **HungOffOperandList = static_cast<Use **>(Storage);
+ User *Obj = reinterpret_cast<User *>(HungOffOperandList + 1);
+ Obj->NumUserOperands = 0;
+ Obj->HasHungOffUses = true;
+ *HungOffOperandList = nullptr;
+ return Obj;
+}
+
//===----------------------------------------------------------------------===//
// User operator delete Implementation
//===----------------------------------------------------------------------===//
void User::operator delete(void *Usr) {
- User *Start = static_cast<User*>(Usr);
- Use *Storage = static_cast<Use*>(Usr) - Start->NumOperands;
- // If there were hung-off uses, they will have been freed already and
- // NumOperands reset to 0, so here we just free the User itself.
- ::operator delete(Storage);
+ // Hung off uses use a single Use* before the User, while other subclasses
+ // use a Use[] allocated prior to the user.
+ User *Obj = static_cast<User *>(Usr);
+ if (Obj->HasHungOffUses) {
+ Use **HungOffOperandList = static_cast<Use **>(Usr) - 1;
+ // drop the hung off uses.
+ Use::zap(*HungOffOperandList, *HungOffOperandList + Obj->NumUserOperands,
+ /* Delete */ true);
+ ::operator delete(HungOffOperandList);
+ } else {
+ Use *Storage = static_cast<Use *>(Usr) - Obj->NumUserOperands;
+ Use::zap(Storage, Storage + Obj->NumUserOperands,
+ /* Delete */ false);
+ ::operator delete(Storage);
+ }
}
//===----------------------------------------------------------------------===//
@@ -84,4 +144,4 @@ Operator::~Operator() {
llvm_unreachable("should never destroy an Operator");
}
-} // End llvm namespace
+} // namespace llvm
diff --git a/lib/IR/Value.cpp b/lib/IR/Value.cpp
index dcf0ad50190f..eb5c2253f4e0 100644
--- a/lib/IR/Value.cpp
+++ b/lib/IR/Value.cpp
@@ -39,6 +39,7 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
// Value Class
//===----------------------------------------------------------------------===//
+const unsigned Value::NumUserOperandsBits;
static inline Type *checkType(Type *Ty) {
assert(Ty && "Value defined with a null type: Error!");
@@ -48,7 +49,7 @@ static inline Type *checkType(Type *Ty) {
Value::Value(Type *ty, unsigned scid)
: VTy(checkType(ty)), UseList(nullptr), SubclassID(scid),
HasValueHandle(0), SubclassOptionalData(0), SubclassData(0),
- NumOperands(0), IsUsedByMD(false), HasName(false) {
+ NumUserOperands(0), IsUsedByMD(false), HasName(false) {
// FIXME: Why isn't this in the subclass gunk??
// Note, we cannot call isa<CallInst> before the CallInst has been
// constructed.
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp
index 5ed137abd0e5..19b11b45ac32 100644
--- a/lib/IR/Verifier.cpp
+++ b/lib/IR/Verifier.cpp
@@ -181,11 +181,6 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
/// \brief Track unresolved string-based type references.
SmallDenseMap<const MDString *, const MDNode *, 32> UnresolvedTypeRefs;
- /// \brief The personality function referenced by the LandingPadInsts.
- /// All LandingPadInsts within the same function must use the same
- /// personality function.
- const Value *PersonalityFn;
-
/// \brief Whether we've seen a call to @llvm.frameescape in this function
/// already.
bool SawFrameEscape;
@@ -196,8 +191,7 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
public:
explicit Verifier(raw_ostream &OS)
- : VerifierSupport(OS), Context(nullptr), PersonalityFn(nullptr),
- SawFrameEscape(false) {}
+ : VerifierSupport(OS), Context(nullptr), SawFrameEscape(false) {}
bool verify(const Function &F) {
M = F.getParent();
@@ -231,7 +225,6 @@ public:
// FIXME: We strip const here because the inst visitor strips const.
visit(const_cast<Function &>(F));
InstsInThisBlock.clear();
- PersonalityFn = nullptr;
SawFrameEscape = false;
return !Broken;
@@ -584,7 +577,6 @@ void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
}
void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
- Assert(!GA.getName().empty(), "Alias name cannot be empty!", &GA);
Assert(GlobalAlias::isValidLinkage(GA.getLinkage()),
"Alias should have private, internal, linkonce, weak, linkonce_odr, "
"weak_odr, or external linkage!",
@@ -1086,7 +1078,7 @@ void Verifier::visitDIExpression(const DIExpression &N) {
void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
Assert(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
if (auto *T = N.getRawType())
- Assert(isa<DIType>(T), "invalid type ref", &N, T);
+ Assert(isTypeRef(N, T), "invalid type ref", &N, T);
if (auto *F = N.getRawFile())
Assert(isa<DIFile>(F), "invalid file", &N, F);
}
@@ -1251,6 +1243,7 @@ void Verifier::VerifyAttributeTypes(AttributeSet Attrs, unsigned Idx,
I->getKindAsEnum() == Attribute::StackProtect ||
I->getKindAsEnum() == Attribute::StackProtectReq ||
I->getKindAsEnum() == Attribute::StackProtectStrong ||
+ I->getKindAsEnum() == Attribute::SafeStack ||
I->getKindAsEnum() == Attribute::NoRedZone ||
I->getKindAsEnum() == Attribute::NoImplicitFloat ||
I->getKindAsEnum() == Attribute::Naked ||
@@ -1757,6 +1750,8 @@ void Verifier::visitFunction(const Function &F) {
"invalid linkage type for function declaration", &F);
Assert(MDs.empty(), "function without a body cannot have metadata", &F,
MDs.empty() ? nullptr : MDs.front().second);
+ Assert(!F.hasPersonalityFn(),
+ "Function declaration shouldn't have a personality routine", &F);
} else {
// Verify that this function (which has a body) is not named "llvm.*". It
// is not legal to define intrinsics.
@@ -2795,22 +2790,16 @@ void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
&LPI);
}
+ Function *F = LPI.getParent()->getParent();
+ Assert(F->hasPersonalityFn(),
+ "LandingPadInst needs to be in a function with a personality.", &LPI);
+
// The landingpad instruction must be the first non-PHI instruction in the
// block.
Assert(LPI.getParent()->getLandingPadInst() == &LPI,
"LandingPadInst not the first non-PHI instruction in the block.",
&LPI);
- // The personality functions for all landingpad instructions within the same
- // function should match.
- if (PersonalityFn)
- Assert(LPI.getPersonalityFn() == PersonalityFn,
- "Personality function doesn't match others in function", &LPI);
- PersonalityFn = LPI.getPersonalityFn();
-
- // All operands must be constants.
- Assert(isa<Constant>(PersonalityFn), "Personality function is not constant!",
- &LPI);
for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
Constant *Clause = LPI.getClause(i);
if (LPI.isCatch(i)) {
@@ -3702,7 +3691,7 @@ struct VerifierLegacyPass : public FunctionPass {
AU.setPreservesAll();
}
};
-}
+} // namespace
char VerifierLegacyPass::ID = 0;
INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
diff --git a/lib/IRReader/CMakeLists.txt b/lib/IRReader/CMakeLists.txt
index 2c0e61b65fbe..87ea88039ef3 100644
--- a/lib/IRReader/CMakeLists.txt
+++ b/lib/IRReader/CMakeLists.txt
@@ -3,4 +3,7 @@ add_llvm_library(LLVMIRReader
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/IRReader
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/IRReader/IRReader.cpp b/lib/IRReader/IRReader.cpp
index 7bc6f076d62d..43fee65db7f5 100644
--- a/lib/IRReader/IRReader.cpp
+++ b/lib/IRReader/IRReader.cpp
@@ -34,14 +34,14 @@ getLazyIRModule(std::unique_ptr<MemoryBuffer> Buffer, SMDiagnostic &Err,
LLVMContext &Context) {
if (isBitcode((const unsigned char *)Buffer->getBufferStart(),
(const unsigned char *)Buffer->getBufferEnd())) {
- ErrorOr<Module *> ModuleOrErr =
+ ErrorOr<std::unique_ptr<Module>> ModuleOrErr =
getLazyBitcodeModule(std::move(Buffer), Context);
if (std::error_code EC = ModuleOrErr.getError()) {
Err = SMDiagnostic(Buffer->getBufferIdentifier(), SourceMgr::DK_Error,
EC.message());
return nullptr;
}
- return std::unique_ptr<Module>(ModuleOrErr.get());
+ return std::move(ModuleOrErr.get());
}
return parseAssembly(Buffer->getMemBufferRef(), Err, Context);
@@ -67,13 +67,14 @@ std::unique_ptr<Module> llvm::parseIR(MemoryBufferRef Buffer, SMDiagnostic &Err,
TimePassesIsEnabled);
if (isBitcode((const unsigned char *)Buffer.getBufferStart(),
(const unsigned char *)Buffer.getBufferEnd())) {
- ErrorOr<Module *> ModuleOrErr = parseBitcodeFile(Buffer, Context);
+ ErrorOr<std::unique_ptr<Module>> ModuleOrErr =
+ parseBitcodeFile(Buffer, Context);
if (std::error_code EC = ModuleOrErr.getError()) {
Err = SMDiagnostic(Buffer.getBufferIdentifier(), SourceMgr::DK_Error,
EC.message());
return nullptr;
}
- return std::unique_ptr<Module>(ModuleOrErr.get());
+ return std::move(ModuleOrErr.get());
}
return parseAssembly(Buffer, Err, Context);
diff --git a/lib/LLVMBuild.txt b/lib/LLVMBuild.txt
index 7e7ebc545979..2edb66ae3ae7 100644
--- a/lib/LLVMBuild.txt
+++ b/lib/LLVMBuild.txt
@@ -16,9 +16,28 @@
;===------------------------------------------------------------------------===;
[common]
-subdirectories = Analysis AsmParser Bitcode CodeGen DebugInfo ExecutionEngine
- LineEditor Linker IR IRReader LTO MC Object Option Passes ProfileData Support
- TableGen Target Transforms
+subdirectories =
+ Analysis
+ AsmParser
+ Bitcode
+ CodeGen
+ DebugInfo
+ ExecutionEngine
+ LibDriver
+ LineEditor
+ Linker
+ IR
+ IRReader
+ LTO
+ MC
+ Object
+ Option
+ Passes
+ ProfileData
+ Support
+ TableGen
+ Target
+ Transforms
[component_0]
type = Group
diff --git a/lib/LTO/LLVMBuild.txt b/lib/LTO/LLVMBuild.txt
index dfd424f3e408..ea79d5e6a83a 100644
--- a/lib/LTO/LLVMBuild.txt
+++ b/lib/LTO/LLVMBuild.txt
@@ -19,4 +19,19 @@
type = Library
name = LTO
parent = Libraries
-required_libraries = Analysis BitReader BitWriter CodeGen Core IPA IPO InstCombine Linker MC ObjCARC Object Scalar Support Target
+required_libraries =
+ Analysis
+ BitReader
+ BitWriter
+ CodeGen
+ Core
+ IPA
+ IPO
+ InstCombine
+ Linker
+ MC
+ ObjCARC
+ Object
+ Scalar
+ Support
+ Target
diff --git a/lib/LTO/LTOModule.cpp b/lib/LTO/LTOModule.cpp
index 5cdbca66a80e..bbb3b6df30cc 100644
--- a/lib/LTO/LTOModule.cpp
+++ b/lib/LTO/LTOModule.cpp
@@ -147,9 +147,10 @@ LTOModule *LTOModule::createInContext(const void *mem, size_t length,
return makeLTOModule(Buffer, options, errMsg, Context);
}
-static Module *parseBitcodeFileImpl(MemoryBufferRef Buffer,
- LLVMContext &Context, bool ShouldBeLazy,
- std::string &ErrMsg) {
+static std::unique_ptr<Module> parseBitcodeFileImpl(MemoryBufferRef Buffer,
+ LLVMContext &Context,
+ bool ShouldBeLazy,
+ std::string &ErrMsg) {
// Find the buffer.
ErrorOr<MemoryBufferRef> MBOrErr =
@@ -168,22 +169,22 @@ static Module *parseBitcodeFileImpl(MemoryBufferRef Buffer,
if (!ShouldBeLazy) {
// Parse the full file.
- ErrorOr<Module *> M =
+ ErrorOr<std::unique_ptr<Module>> M =
parseBitcodeFile(*MBOrErr, Context, DiagnosticHandler);
if (!M)
return nullptr;
- return *M;
+ return std::move(*M);
}
// Parse lazily.
std::unique_ptr<MemoryBuffer> LightweightBuf =
MemoryBuffer::getMemBuffer(*MBOrErr, false);
- ErrorOr<Module *> M = getLazyBitcodeModule(std::move(LightweightBuf), Context,
- DiagnosticHandler,
- true/*ShouldLazyLoadMetadata*/);
+ ErrorOr<std::unique_ptr<Module>> M =
+ getLazyBitcodeModule(std::move(LightweightBuf), Context,
+ DiagnosticHandler, true /*ShouldLazyLoadMetadata*/);
if (!M)
return nullptr;
- return *M;
+ return std::move(*M);
}
LTOModule *LTOModule::makeLTOModule(MemoryBufferRef Buffer,
@@ -197,9 +198,9 @@ LTOModule *LTOModule::makeLTOModule(MemoryBufferRef Buffer,
// If we own a context, we know this is being used only for symbol
// extraction, not linking. Be lazy in that case.
- std::unique_ptr<Module> M(parseBitcodeFileImpl(
+ std::unique_ptr<Module> M = parseBitcodeFileImpl(
Buffer, *Context,
- /* ShouldBeLazy */ static_cast<bool>(OwnedContext), errMsg));
+ /* ShouldBeLazy */ static_cast<bool>(OwnedContext), errMsg);
if (!M)
return nullptr;
@@ -468,6 +469,9 @@ void LTOModule::addDefinedSymbol(const char *Name, const GlobalValue *def,
else
attr |= LTO_SYMBOL_SCOPE_DEFAULT;
+ if (def->hasComdat())
+ attr |= LTO_SYMBOL_COMDAT;
+
auto Iter = _defines.insert(Name).first;
// fill information structure
diff --git a/lib/LibDriver/CMakeLists.txt b/lib/LibDriver/CMakeLists.txt
new file mode 100644
index 000000000000..ab53a6843446
--- /dev/null
+++ b/lib/LibDriver/CMakeLists.txt
@@ -0,0 +1,8 @@
+set(LLVM_TARGET_DEFINITIONS Options.td)
+tablegen(LLVM Options.inc -gen-opt-parser-defs)
+add_public_tablegen_target(LibOptionsTableGen)
+
+add_llvm_library(LLVMLibDriver
+ LibDriver.cpp
+ )
+add_dependencies(LLVMLibDriver LibOptionsTableGen)
diff --git a/lib/Target/R600/MCTargetDesc/LLVMBuild.txt b/lib/LibDriver/LLVMBuild.txt
index 74b8ca09ae12..799dc997c0bb 100644
--- a/lib/Target/R600/MCTargetDesc/LLVMBuild.txt
+++ b/lib/LibDriver/LLVMBuild.txt
@@ -1,4 +1,4 @@
-;===- ./lib/Target/R600/MCTargetDesc/LLVMBuild.txt -------------*- Conf -*--===;
+;===- ./lib/LibDriver/LLVMBuild.txt ----------------------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
@@ -17,7 +17,6 @@
[component_0]
type = Library
-name = R600Desc
-parent = R600
-required_libraries = MC R600AsmPrinter R600Info Support
-add_to_library_groups = R600
+name = LibDriver
+parent = Libraries
+required_libraries = Object Option Support
diff --git a/lib/LibDriver/LibDriver.cpp b/lib/LibDriver/LibDriver.cpp
new file mode 100644
index 000000000000..c9857b0493d6
--- /dev/null
+++ b/lib/LibDriver/LibDriver.cpp
@@ -0,0 +1,157 @@
+//===- LibDriver.cpp - lib.exe-compatible driver --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines an interface to a lib.exe-compatible driver that also understands
+// bitcode files. Used by llvm-lib and lld-link2 /lib.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LibDriver/LibDriver.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Object/ArchiveWriter.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/StringSaver.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+
+enum {
+ OPT_INVALID = 0,
+#define OPTION(_1, _2, ID, _4, _5, _6, _7, _8, _9, _10, _11) OPT_##ID,
+#include "Options.inc"
+#undef OPTION
+};
+
+#define PREFIX(NAME, VALUE) const char *const NAME[] = VALUE;
+#include "Options.inc"
+#undef PREFIX
+
+static const llvm::opt::OptTable::Info infoTable[] = {
+#define OPTION(X1, X2, ID, KIND, GROUP, ALIAS, X6, X7, X8, X9, X10) \
+ { \
+ X1, X2, X9, X10, OPT_##ID, llvm::opt::Option::KIND##Class, X8, X7, \
+ OPT_##GROUP, OPT_##ALIAS, X6 \
+ },
+#include "Options.inc"
+#undef OPTION
+};
+
+class LibOptTable : public llvm::opt::OptTable {
+public:
+ LibOptTable() : OptTable(infoTable, llvm::array_lengthof(infoTable), true) {}
+};
+
+} // namespace
+
+static std::string getOutputPath(llvm::opt::InputArgList *Args) {
+ if (auto *Arg = Args->getLastArg(OPT_out))
+ return Arg->getValue();
+ for (auto *Arg : Args->filtered(OPT_INPUT)) {
+ if (!StringRef(Arg->getValue()).endswith_lower(".obj"))
+ continue;
+ SmallString<128> Val = StringRef(Arg->getValue());
+ llvm::sys::path::replace_extension(Val, ".lib");
+ return Val.str();
+ }
+ llvm_unreachable("internal error");
+}
+
+static std::vector<StringRef> getSearchPaths(llvm::opt::InputArgList *Args,
+ StringSaver &Saver) {
+ std::vector<StringRef> Ret;
+ // Add current directory as first item of the search path.
+ Ret.push_back("");
+
+ // Add /libpath flags.
+ for (auto *Arg : Args->filtered(OPT_libpath))
+ Ret.push_back(Arg->getValue());
+
+ // Add $LIB.
+ Optional<std::string> EnvOpt = sys::Process::GetEnv("LIB");
+ if (!EnvOpt.hasValue())
+ return Ret;
+ StringRef Env = Saver.save(*EnvOpt);
+ while (!Env.empty()) {
+ StringRef Path;
+ std::tie(Path, Env) = Env.split(';');
+ Ret.push_back(Path);
+ }
+ return Ret;
+}
+
+static Optional<std::string> findInputFile(StringRef File,
+ ArrayRef<StringRef> Paths) {
+ for (auto Dir : Paths) {
+ SmallString<128> Path = Dir;
+ sys::path::append(Path, File);
+ if (sys::fs::exists(Path))
+ return Path.str().str();
+ }
+ return Optional<std::string>();
+}
+
+int llvm::libDriverMain(int Argc, const char **Argv) {
+ SmallVector<const char *, 20> NewArgv(Argv, Argv + Argc);
+ BumpPtrAllocator Alloc;
+ BumpPtrStringSaver Saver(Alloc);
+ cl::ExpandResponseFiles(Saver, cl::TokenizeWindowsCommandLine, NewArgv);
+ Argv = &NewArgv[0];
+ Argc = static_cast<int>(NewArgv.size());
+
+ LibOptTable Table;
+ unsigned MissingIndex;
+ unsigned MissingCount;
+ std::unique_ptr<llvm::opt::InputArgList> Args(
+ Table.ParseArgs(&Argv[1], &Argv[Argc], MissingIndex, MissingCount));
+ if (MissingCount) {
+ llvm::errs() << "missing arg value for \""
+ << Args->getArgString(MissingIndex)
+ << "\", expected " << MissingCount
+ << (MissingCount == 1 ? " argument.\n" : " arguments.\n");
+ return 1;
+ }
+ for (auto *Arg : Args->filtered(OPT_UNKNOWN))
+ llvm::errs() << "ignoring unknown argument: " << Arg->getSpelling() << "\n";
+
+ if (Args->filtered_begin(OPT_INPUT) == Args->filtered_end()) {
+ llvm::errs() << "no input files.\n";
+ return 1;
+ }
+
+ std::vector<StringRef> SearchPaths = getSearchPaths(Args.get(), Saver);
+
+ std::vector<llvm::NewArchiveIterator> Members;
+ for (auto *Arg : Args->filtered(OPT_INPUT)) {
+ Optional<std::string> Path = findInputFile(Arg->getValue(), SearchPaths);
+ if (!Path.hasValue()) {
+ llvm::errs() << Arg->getValue() << ": no such file or directory\n";
+ return 1;
+ }
+ Members.emplace_back(Saver.save(*Path),
+ llvm::sys::path::filename(Arg->getValue()));
+ }
+
+ std::pair<StringRef, std::error_code> Result = llvm::writeArchive(
+ getOutputPath(Args.get()), Members, /*WriteSymtab=*/true);
+ if (Result.second) {
+ if (Result.first.empty())
+ Result.first = Argv[0];
+ llvm::errs() << Result.first << ": " << Result.second.message() << "\n";
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/lib/LibDriver/Makefile b/lib/LibDriver/Makefile
new file mode 100644
index 000000000000..1c62eac9093d
--- /dev/null
+++ b/lib/LibDriver/Makefile
@@ -0,0 +1,20 @@
+##===- lib/LibDriver/Makefile ------------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../..
+LIBRARYNAME = LLVMLibDriver
+BUILD_ARCHIVE := 1
+BUILT_SOURCES = Options.inc
+TABLEGEN_INC_FILES_COMMON = 1
+
+include $(LEVEL)/Makefile.common
+
+$(ObjDir)/Options.inc.tmp : Options.td $(LLVM_TBLGEN) $(ObjDir)/.dir
+ $(Echo) "Building lib Driver Option tables with tblgen"
+ $(Verb) $(LLVMTableGen) -gen-opt-parser-defs -o $(call SYSPATH, $@) $<
diff --git a/lib/LibDriver/Options.td b/lib/LibDriver/Options.td
new file mode 100644
index 000000000000..0aa1affbebc9
--- /dev/null
+++ b/lib/LibDriver/Options.td
@@ -0,0 +1,23 @@
+include "llvm/Option/OptParser.td"
+
+// lib.exe accepts options starting with either a dash or a slash.
+
+// Flag that takes no arguments.
+class F<string name> : Flag<["/", "-", "-?"], name>;
+
+// Flag that takes one argument after ":".
+class P<string name, string help> :
+ Joined<["/", "-", "-?"], name#":">, HelpText<help>;
+
+def libpath: P<"libpath", "Object file search path">;
+def out : P<"out", "Path to file to write output">;
+
+//==============================================================================
+// The flags below do nothing. They are defined only for lib.exe compatibility.
+//==============================================================================
+
+class QF<string name> : Joined<["/", "-", "-?"], name#":">;
+
+def ignore : QF<"ignore">;
+def machine: QF<"machine">;
+def nologo : F<"nologo">;
diff --git a/lib/Linker/CMakeLists.txt b/lib/Linker/CMakeLists.txt
index 5a1f31a97ee2..f9d8e0925ae3 100644
--- a/lib/Linker/CMakeLists.txt
+++ b/lib/Linker/CMakeLists.txt
@@ -3,4 +3,7 @@ add_llvm_library(LLVMLinker
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/Linker
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/Linker/LinkModules.cpp b/lib/Linker/LinkModules.cpp
index 1b7a33168f1d..f80f6bc4ce45 100644
--- a/lib/Linker/LinkModules.cpp
+++ b/lib/Linker/LinkModules.cpp
@@ -99,7 +99,7 @@ private:
bool areTypesIsomorphic(Type *DstTy, Type *SrcTy);
};
-}
+} // namespace
void TypeMapTy::addTypeMapping(Type *DstTy, Type *SrcTy) {
assert(SpeculativeTypes.empty());
@@ -507,7 +507,7 @@ private:
void linkNamedMDNodes();
void stripReplacedSubprograms();
};
-}
+} // namespace
/// The LLVM SymbolTable class autorenames globals that conflict in the symbol
/// table. This is good for all clients except for us. Go through the trouble
@@ -1194,6 +1194,11 @@ bool ModuleLinker::linkFunctionBody(Function &Dst, Function &Src) {
Dst.setPrologueData(MapValue(Src.getPrologueData(), ValueMap, RF_None,
&TypeMap, &ValMaterializer));
+ // Link in the personality function.
+ if (Src.hasPersonalityFn())
+ Dst.setPersonalityFn(MapValue(Src.getPersonalityFn(), ValueMap, RF_None,
+ &TypeMap, &ValMaterializer));
+
// Go through and convert function arguments over, remembering the mapping.
Function::arg_iterator DI = Dst.arg_begin();
for (Argument &Arg : Src.args()) {
@@ -1254,15 +1259,15 @@ bool ModuleLinker::linkGlobalValueBody(GlobalValue &Src) {
/// Insert all of the named MDNodes in Src into the Dest module.
void ModuleLinker::linkNamedMDNodes() {
const NamedMDNode *SrcModFlags = SrcM->getModuleFlagsMetadata();
- for (Module::const_named_metadata_iterator I = SrcM->named_metadata_begin(),
- E = SrcM->named_metadata_end(); I != E; ++I) {
+ for (const NamedMDNode &NMD : SrcM->named_metadata()) {
// Don't link module flags here. Do them separately.
- if (&*I == SrcModFlags) continue;
- NamedMDNode *DestNMD = DstM->getOrInsertNamedMetadata(I->getName());
+ if (&NMD == SrcModFlags)
+ continue;
+ NamedMDNode *DestNMD = DstM->getOrInsertNamedMetadata(NMD.getName());
// Add Src elements into Dest node.
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
- DestNMD->addOperand(MapMetadata(I->getOperand(i), ValueMap, RF_None,
- &TypeMap, &ValMaterializer));
+ for (const MDNode *op : NMD.operands())
+ DestNMD->addOperand(
+ MapMetadata(op, ValueMap, RF_None, &TypeMap, &ValMaterializer));
}
}
@@ -1542,9 +1547,8 @@ bool ModuleLinker::run() {
// Insert all of the globals in src into the DstM module... without linking
// initializers (which could refer to functions not yet mapped over).
- for (Module::global_iterator I = SrcM->global_begin(),
- E = SrcM->global_end(); I != E; ++I)
- if (linkGlobalValueProto(I))
+ for (GlobalVariable &GV : SrcM->globals())
+ if (linkGlobalValueProto(&GV))
return true;
// Link the functions together between the two modules, without doing function
@@ -1552,18 +1556,17 @@ bool ModuleLinker::run() {
// function... We do this so that when we begin processing function bodies,
// all of the global values that may be referenced are available in our
// ValueMap.
- for (Module::iterator I = SrcM->begin(), E = SrcM->end(); I != E; ++I)
- if (linkGlobalValueProto(I))
+ for (Function &F :*SrcM)
+ if (linkGlobalValueProto(&F))
return true;
// If there were any aliases, link them now.
- for (Module::alias_iterator I = SrcM->alias_begin(),
- E = SrcM->alias_end(); I != E; ++I)
- if (linkGlobalValueProto(I))
+ for (GlobalAlias &GA : SrcM->aliases())
+ if (linkGlobalValueProto(&GA))
return true;
- for (unsigned i = 0, e = AppendingVars.size(); i != e; ++i)
- linkAppendingVarInit(AppendingVars[i]);
+ for (const AppendingVarInfo &AppendingVar : AppendingVars)
+ linkAppendingVarInit(AppendingVar);
for (const auto &Entry : DstM->getComdatSymbolTable()) {
const Comdat &C = Entry.getValue();
@@ -1802,7 +1805,9 @@ LLVMBool LLVMLinkModules(LLVMModuleRef Dest, LLVMModuleRef Src,
LLVMBool Result = Linker::LinkModules(
D, unwrap(Src), [&](const DiagnosticInfo &DI) { DI.print(DP); });
- if (OutMessages && Result)
+ if (OutMessages && Result) {
+ Stream.flush();
*OutMessages = strdup(Message.c_str());
+ }
return Result;
}
diff --git a/lib/MC/ELFObjectWriter.cpp b/lib/MC/ELFObjectWriter.cpp
index 0765937d0ea8..c9df8fcf441c 100644
--- a/lib/MC/ELFObjectWriter.cpp
+++ b/lib/MC/ELFObjectWriter.cpp
@@ -144,6 +144,7 @@ class ELFObjectWriter : public MCObjectWriter {
Renames.clear();
Relocations.clear();
StrTabBuilder.clear();
+ SymtabShndxSectionIndex = 0;
SectionTable.clear();
MCObjectWriter::reset();
}
@@ -231,7 +232,7 @@ class ELFObjectWriter : public MCObjectWriter {
uint32_t GroupSymbolIndex, uint64_t Offset, uint64_t Size,
const MCSectionELF &Section);
};
-}
+} // namespace
void ELFObjectWriter::align(unsigned Alignment) {
uint64_t Padding = OffsetToAlignment(OS.tell(), Alignment);
diff --git a/lib/MC/MCAsmStreamer.cpp b/lib/MC/MCAsmStreamer.cpp
index 0f405ad1193e..9a65a3158972 100644
--- a/lib/MC/MCAsmStreamer.cpp
+++ b/lib/MC/MCAsmStreamer.cpp
@@ -1308,7 +1308,10 @@ void MCAsmStreamer::EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &S
GetCommentOS() << "\n";
}
- InstPrinter->printInst(&Inst, OS, "", STI);
+ if(getTargetStreamer())
+ getTargetStreamer()->prettyPrintAsm(*InstPrinter, OS, Inst, STI);
+ else
+ InstPrinter->printInst(&Inst, OS, "", STI);
EmitEOL();
}
diff --git a/lib/MC/MCAssembler.cpp b/lib/MC/MCAssembler.cpp
index 55f50097744d..34211aa901fa 100644
--- a/lib/MC/MCAssembler.cpp
+++ b/lib/MC/MCAssembler.cpp
@@ -54,8 +54,8 @@ STATISTIC(FragmentLayouts, "Number of fragment layouts");
STATISTIC(ObjectBytes, "Number of emitted object file bytes");
STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
-}
-}
+} // namespace stats
+} // namespace
// FIXME FIXME FIXME: There are number of places in this file where we convert
// what is a 64-bit assembler value used for computation into a value in the
@@ -262,26 +262,64 @@ uint64_t llvm::computeBundlePadding(const MCAssembler &Assembler,
/* *** */
-MCFragment::MCFragment() : Kind(FragmentType(~0)) {
+void ilist_node_traits<MCFragment>::deleteNode(MCFragment *V) {
+ V->destroy();
}
-MCFragment::~MCFragment() {
+MCFragment::MCFragment() : Kind(FragmentType(~0)), HasInstructions(false),
+ AlignToBundleEnd(false), BundlePadding(0) {
}
-MCFragment::MCFragment(FragmentType Kind, MCSection *Parent)
- : Kind(Kind), Parent(Parent), Atom(nullptr), Offset(~UINT64_C(0)) {
+MCFragment::~MCFragment() { }
+
+MCFragment::MCFragment(FragmentType Kind, bool HasInstructions,
+ uint8_t BundlePadding, MCSection *Parent)
+ : Kind(Kind), HasInstructions(HasInstructions), AlignToBundleEnd(false),
+ BundlePadding(BundlePadding), Parent(Parent), Atom(nullptr),
+ Offset(~UINT64_C(0)) {
if (Parent)
Parent->getFragmentList().push_back(this);
}
-/* *** */
-
-MCEncodedFragment::~MCEncodedFragment() {
-}
-
-/* *** */
+void MCFragment::destroy() {
+ // First check if we are the sentinal.
+ if (Kind == FragmentType(~0)) {
+ delete this;
+ return;
+ }
-MCEncodedFragmentWithFixups::~MCEncodedFragmentWithFixups() {
+ switch (Kind) {
+ case FT_Align:
+ delete cast<MCAlignFragment>(this);
+ return;
+ case FT_Data:
+ delete cast<MCDataFragment>(this);
+ return;
+ case FT_CompactEncodedInst:
+ delete cast<MCCompactEncodedInstFragment>(this);
+ return;
+ case FT_Fill:
+ delete cast<MCFillFragment>(this);
+ return;
+ case FT_Relaxable:
+ delete cast<MCRelaxableFragment>(this);
+ return;
+ case FT_Org:
+ delete cast<MCOrgFragment>(this);
+ return;
+ case FT_Dwarf:
+ delete cast<MCDwarfLineAddrFragment>(this);
+ return;
+ case FT_DwarfFrame:
+ delete cast<MCDwarfCallFrameFragment>(this);
+ return;
+ case FT_LEB:
+ delete cast<MCLEBFragment>(this);
+ return;
+ case FT_SafeSEH:
+ delete cast<MCSafeSEHFragment>(this);
+ return;
+ }
}
/* *** */
@@ -345,16 +383,6 @@ bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const {
return true;
}
-void MCAssembler::addLocalUsedInReloc(const MCSymbol &Sym) {
- assert(Sym.isTemporary());
- LocalsUsedInReloc.insert(&Sym);
-}
-
-bool MCAssembler::isLocalUsedInReloc(const MCSymbol &Sym) const {
- assert(Sym.isTemporary());
- return LocalsUsedInReloc.count(&Sym);
-}
-
bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
// Non-temporary labels should always be visible to the linker.
if (!Symbol.isTemporary())
@@ -364,7 +392,7 @@ bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
if (!Symbol.isInSection())
return false;
- if (isLocalUsedInReloc(Symbol))
+ if (Symbol.isUsedInReloc())
return true;
return false;
@@ -464,9 +492,11 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout,
const MCFragment &F) const {
switch (F.getKind()) {
case MCFragment::FT_Data:
+ return cast<MCDataFragment>(F).getContents().size();
case MCFragment::FT_Relaxable:
+ return cast<MCRelaxableFragment>(F).getContents().size();
case MCFragment::FT_CompactEncodedInst:
- return cast<MCEncodedFragment>(F).getContents().size();
+ return cast<MCCompactEncodedInstFragment>(F).getContents().size();
case MCFragment::FT_Fill:
return cast<MCFillFragment>(F).getSize();
@@ -572,13 +602,6 @@ void MCAsmLayout::layoutFragment(MCFragment *F) {
}
}
-/// \brief Write the contents of a fragment to the given object writer. Expects
-/// a MCEncodedFragment.
-static void writeFragmentContents(const MCFragment &F, MCObjectWriter *OW) {
- const MCEncodedFragment &EF = cast<MCEncodedFragment>(F);
- OW->writeBytes(EF.getContents());
-}
-
void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) {
bool New = !Symbol.isRegistered();
if (Created)
@@ -681,17 +704,17 @@ static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout,
case MCFragment::FT_Data:
++stats::EmittedDataFragments;
- writeFragmentContents(F, OW);
+ OW->writeBytes(cast<MCDataFragment>(F).getContents());
break;
case MCFragment::FT_Relaxable:
++stats::EmittedRelaxableFragments;
- writeFragmentContents(F, OW);
+ OW->writeBytes(cast<MCRelaxableFragment>(F).getContents());
break;
case MCFragment::FT_CompactEncodedInst:
++stats::EmittedCompactEncodedInstFragments;
- writeFragmentContents(F, OW);
+ OW->writeBytes(cast<MCCompactEncodedInstFragment>(F).getContents());
break;
case MCFragment::FT_Fill: {
@@ -880,18 +903,29 @@ void MCAssembler::Finish() {
for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
for (MCSection::iterator it2 = it->begin(), ie2 = it->end(); it2 != ie2;
++it2) {
- MCEncodedFragmentWithFixups *F =
- dyn_cast<MCEncodedFragmentWithFixups>(it2);
- if (F) {
- for (MCEncodedFragmentWithFixups::fixup_iterator it3 = F->fixup_begin(),
- ie3 = F->fixup_end(); it3 != ie3; ++it3) {
- MCFixup &Fixup = *it3;
- uint64_t FixedValue;
- bool IsPCRel;
- std::tie(FixedValue, IsPCRel) = handleFixup(Layout, *F, Fixup);
- getBackend().applyFixup(Fixup, F->getContents().data(),
- F->getContents().size(), FixedValue, IsPCRel);
- }
+ MCEncodedFragment *F = dyn_cast<MCEncodedFragment>(it2);
+ // Data and relaxable fragments both have fixups. So only process
+ // those here.
+ // FIXME: Is there a better way to do this? MCEncodedFragmentWithFixups
+ // being templated makes this tricky.
+ if (!F || isa<MCCompactEncodedInstFragment>(F))
+ continue;
+ ArrayRef<MCFixup> Fixups;
+ MutableArrayRef<char> Contents;
+ if (auto *FragWithFixups = dyn_cast<MCDataFragment>(F)) {
+ Fixups = FragWithFixups->getFixups();
+ Contents = FragWithFixups->getContents();
+ } else if (auto *FragWithFixups = dyn_cast<MCRelaxableFragment>(F)) {
+ Fixups = FragWithFixups->getFixups();
+ Contents = FragWithFixups->getContents();
+ } else
+ llvm_unreachable("Unknow fragment with fixups!");
+ for (const MCFixup &Fixup : Fixups) {
+ uint64_t FixedValue;
+ bool IsPCRel;
+ std::tie(FixedValue, IsPCRel) = handleFixup(Layout, *F, Fixup);
+ getBackend().applyFixup(Fixup, Contents.data(),
+ Contents.size(), FixedValue, IsPCRel);
}
}
}
@@ -1228,17 +1262,3 @@ void MCAssembler::dump() {
OS << "]>\n";
}
#endif
-
-// anchors for MC*Fragment vtables
-void MCEncodedFragment::anchor() { }
-void MCEncodedFragmentWithFixups::anchor() { }
-void MCDataFragment::anchor() { }
-void MCCompactEncodedInstFragment::anchor() { }
-void MCRelaxableFragment::anchor() { }
-void MCAlignFragment::anchor() { }
-void MCFillFragment::anchor() { }
-void MCOrgFragment::anchor() { }
-void MCLEBFragment::anchor() { }
-void MCSafeSEHFragment::anchor() { }
-void MCDwarfLineAddrFragment::anchor() { }
-void MCDwarfCallFrameFragment::anchor() { }
diff --git a/lib/MC/MCContext.cpp b/lib/MC/MCContext.cpp
index 1e52eedaf188..c601c56f3952 100644
--- a/lib/MC/MCContext.cpp
+++ b/lib/MC/MCContext.cpp
@@ -135,7 +135,7 @@ MCSymbolELF *MCContext::getOrCreateSectionSymbol(const MCSectionELF &Section) {
}
auto NameIter = UsedNames.insert(std::make_pair(Name, true)).first;
- Sym = new (*this) MCSymbolELF(&*NameIter, /*isTemporary*/ false);
+ Sym = new (&*NameIter, *this) MCSymbolELF(&*NameIter, /*isTemporary*/ false);
if (!OldSym)
OldSym = Sym;
@@ -164,25 +164,26 @@ MCSymbol *MCContext::createSymbolImpl(const StringMapEntry<bool> *Name,
if (MOFI) {
switch (MOFI->getObjectFileType()) {
case MCObjectFileInfo::IsCOFF:
- return new (*this) MCSymbolCOFF(Name, IsTemporary);
+ return new (Name, *this) MCSymbolCOFF(Name, IsTemporary);
case MCObjectFileInfo::IsELF:
- return new (*this) MCSymbolELF(Name, IsTemporary);
+ return new (Name, *this) MCSymbolELF(Name, IsTemporary);
case MCObjectFileInfo::IsMachO:
- return new (*this) MCSymbolMachO(Name, IsTemporary);
+ return new (Name, *this) MCSymbolMachO(Name, IsTemporary);
}
}
- return new (*this) MCSymbol(MCSymbol::SymbolKindUnset, Name, IsTemporary);
+ return new (Name, *this) MCSymbol(MCSymbol::SymbolKindUnset, Name,
+ IsTemporary);
}
MCSymbol *MCContext::createSymbol(StringRef Name, bool AlwaysAddSuffix,
- bool IsTemporary) {
- if (IsTemporary && !UseNamesOnTempLabels)
+ bool CanBeUnnamed) {
+ if (CanBeUnnamed && !UseNamesOnTempLabels)
return createSymbolImpl(nullptr, true);
// Determine whether this is an user writter assembler temporary or normal
// label, if used.
- IsTemporary = false;
- if (AllowTemporaryLabels)
+ bool IsTemporary = CanBeUnnamed;
+ if (AllowTemporaryLabels && !IsTemporary)
IsTemporary = Name.startswith(MAI->getPrivateGlobalPrefix());
SmallString<128> NewName = Name;
@@ -205,10 +206,11 @@ MCSymbol *MCContext::createSymbol(StringRef Name, bool AlwaysAddSuffix,
llvm_unreachable("Infinite loop");
}
-MCSymbol *MCContext::createTempSymbol(const Twine &Name, bool AlwaysAddSuffix) {
+MCSymbol *MCContext::createTempSymbol(const Twine &Name, bool AlwaysAddSuffix,
+ bool CanBeUnnamed) {
SmallString<128> NameSV;
raw_svector_ostream(NameSV) << MAI->getPrivateGlobalPrefix() << Name;
- return createSymbol(NameSV, AlwaysAddSuffix, true);
+ return createSymbol(NameSV, AlwaysAddSuffix, CanBeUnnamed);
}
MCSymbol *MCContext::createLinkerPrivateTempSymbol() {
@@ -217,8 +219,8 @@ MCSymbol *MCContext::createLinkerPrivateTempSymbol() {
return createSymbol(NameSV, true, false);
}
-MCSymbol *MCContext::createTempSymbol() {
- return createTempSymbol("tmp", true);
+MCSymbol *MCContext::createTempSymbol(bool CanBeUnnamed) {
+ return createTempSymbol("tmp", true, CanBeUnnamed);
}
unsigned MCContext::NextInstance(unsigned LocalLabelVal) {
@@ -239,7 +241,7 @@ MCSymbol *MCContext::getOrCreateDirectionalLocalSymbol(unsigned LocalLabelVal,
unsigned Instance) {
MCSymbol *&Sym = LocalSymbols[std::make_pair(LocalLabelVal, Instance)];
if (!Sym)
- Sym = createTempSymbol();
+ Sym = createTempSymbol(false);
return Sym;
}
diff --git a/lib/MC/MCDisassembler/MCExternalSymbolizer.cpp b/lib/MC/MCDisassembler/MCExternalSymbolizer.cpp
index 68948d36d65c..b9aebfc617f9 100644
--- a/lib/MC/MCDisassembler/MCExternalSymbolizer.cpp
+++ b/lib/MC/MCDisassembler/MCExternalSymbolizer.cpp
@@ -193,4 +193,4 @@ MCSymbolizer *createMCSymbolizer(StringRef TT, LLVMOpInfoCallback GetOpInfo,
return new MCExternalSymbolizer(*Ctx, std::move(RelInfo), GetOpInfo,
SymbolLookUp, DisInfo);
}
-}
+} // namespace llvm
diff --git a/lib/MC/MCDisassembler/MCRelocationInfo.cpp b/lib/MC/MCDisassembler/MCRelocationInfo.cpp
index ff0c27f5faf3..43005e7c740c 100644
--- a/lib/MC/MCDisassembler/MCRelocationInfo.cpp
+++ b/lib/MC/MCDisassembler/MCRelocationInfo.cpp
@@ -34,6 +34,7 @@ MCRelocationInfo::createExprForCAPIVariantKind(const MCExpr *SubExpr,
return SubExpr;
}
-MCRelocationInfo *llvm::createMCRelocationInfo(StringRef TT, MCContext &Ctx) {
+MCRelocationInfo *llvm::createMCRelocationInfo(const Triple &TT,
+ MCContext &Ctx) {
return new MCRelocationInfo(Ctx);
}
diff --git a/lib/MC/MCDwarf.cpp b/lib/MC/MCDwarf.cpp
index 90f96e2cef54..4ae2bcfab72b 100644
--- a/lib/MC/MCDwarf.cpp
+++ b/lib/MC/MCDwarf.cpp
@@ -1461,7 +1461,7 @@ namespace {
bool IsSignalFrame;
bool IsSimple;
};
-}
+} // namespace
namespace llvm {
template <>
@@ -1488,7 +1488,7 @@ namespace llvm {
LHS.IsSimple == RHS.IsSimple;
}
};
-}
+} // namespace llvm
void MCDwarfFrameEmitter::Emit(MCObjectStreamer &Streamer, MCAsmBackend *MAB,
bool IsEH) {
@@ -1590,18 +1590,17 @@ void MCDwarfFrameEmitter::EncodeAdvanceLoc(MCContext &Context,
OS << uint8_t(dwarf::DW_CFA_advance_loc1);
OS << uint8_t(AddrDelta);
} else if (isUInt<16>(AddrDelta)) {
- // FIXME: check what is the correct behavior on a big endian machine.
OS << uint8_t(dwarf::DW_CFA_advance_loc2);
- OS << uint8_t( AddrDelta & 0xff);
- OS << uint8_t((AddrDelta >> 8) & 0xff);
+ if (Context.getAsmInfo()->isLittleEndian())
+ support::endian::Writer<support::little>(OS).write<uint16_t>(AddrDelta);
+ else
+ support::endian::Writer<support::big>(OS).write<uint16_t>(AddrDelta);
} else {
- // FIXME: check what is the correct behavior on a big endian machine.
assert(isUInt<32>(AddrDelta));
OS << uint8_t(dwarf::DW_CFA_advance_loc4);
- OS << uint8_t( AddrDelta & 0xff);
- OS << uint8_t((AddrDelta >> 8) & 0xff);
- OS << uint8_t((AddrDelta >> 16) & 0xff);
- OS << uint8_t((AddrDelta >> 24) & 0xff);
-
+ if (Context.getAsmInfo()->isLittleEndian())
+ support::endian::Writer<support::little>(OS).write<uint32_t>(AddrDelta);
+ else
+ support::endian::Writer<support::big>(OS).write<uint32_t>(AddrDelta);
}
}
diff --git a/lib/MC/MCELFStreamer.cpp b/lib/MC/MCELFStreamer.cpp
index e0f4a2ae16a3..fe9ac21e17fc 100644
--- a/lib/MC/MCELFStreamer.cpp
+++ b/lib/MC/MCELFStreamer.cpp
@@ -45,7 +45,7 @@ MCELFStreamer::~MCELFStreamer() {
}
void MCELFStreamer::mergeFragment(MCDataFragment *DF,
- MCEncodedFragmentWithFixups *EF) {
+ MCDataFragment *EF) {
MCAssembler &Assembler = getAssembler();
if (Assembler.isBundlingEnabled() && Assembler.getRelaxAll()) {
diff --git a/lib/MC/MCNullStreamer.cpp b/lib/MC/MCNullStreamer.cpp
index eb2d91254b34..e0f610bf4ac4 100644
--- a/lib/MC/MCNullStreamer.cpp
+++ b/lib/MC/MCNullStreamer.cpp
@@ -36,7 +36,7 @@ namespace {
void EmitGPRel32Value(const MCExpr *Value) override {}
};
-}
+} // namespace
MCStreamer *llvm::createNullStreamer(MCContext &Context) {
return new MCNullStreamer(Context);
diff --git a/lib/MC/MCObjectFileInfo.cpp b/lib/MC/MCObjectFileInfo.cpp
index 83a08e28a816..aa3d965bbce1 100644
--- a/lib/MC/MCObjectFileInfo.cpp
+++ b/lib/MC/MCObjectFileInfo.cpp
@@ -238,6 +238,9 @@ void MCObjectFileInfo::initMachOMCObjectFileInfo(Triple T) {
StackMapSection = Ctx->getMachOSection("__LLVM_STACKMAPS", "__llvm_stackmaps",
0, SectionKind::getMetadata());
+ FaultMapSection = Ctx->getMachOSection("__LLVM_FAULTMAPS", "__llvm_faultmaps",
+ 0, SectionKind::getMetadata());
+
TLSExtraDataSection = TLSTLVSection;
}
@@ -518,6 +521,9 @@ void MCObjectFileInfo::initELFMCObjectFileInfo(Triple T) {
StackMapSection =
Ctx->getELFSection(".llvm_stackmaps", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
+
+ FaultMapSection =
+ Ctx->getELFSection(".llvm_faultmaps", ELF::SHT_PROGBITS, ELF::SHF_ALLOC);
}
void MCObjectFileInfo::initCOFFMCObjectFileInfo(Triple T) {
@@ -729,7 +735,8 @@ void MCObjectFileInfo::initCOFFMCObjectFileInfo(Triple T) {
SectionKind::getDataRel());
}
-void MCObjectFileInfo::InitMCObjectFileInfo(StringRef T, Reloc::Model relocm,
+void MCObjectFileInfo::InitMCObjectFileInfo(const Triple &TheTriple,
+ Reloc::Model relocm,
CodeModel::Model cm,
MCContext &ctx) {
RelocM = relocm;
@@ -753,7 +760,7 @@ void MCObjectFileInfo::InitMCObjectFileInfo(StringRef T, Reloc::Model relocm,
DwarfAccelNamespaceSection = nullptr; // Used only by selected targets.
DwarfAccelTypesSection = nullptr; // Used only by selected targets.
- TT = Triple(T);
+ TT = TheTriple;
Triple::ArchType Arch = TT.getArch();
// FIXME: Checking for Arch here to filter out bogus triples such as
@@ -777,6 +784,12 @@ void MCObjectFileInfo::InitMCObjectFileInfo(StringRef T, Reloc::Model relocm,
}
}
+void MCObjectFileInfo::InitMCObjectFileInfo(StringRef TT, Reloc::Model RM,
+ CodeModel::Model CM,
+ MCContext &ctx) {
+ InitMCObjectFileInfo(Triple(TT), RM, CM, ctx);
+}
+
MCSection *MCObjectFileInfo::getDwarfTypesSection(uint64_t Hash) const {
return Ctx->getELFSection(".debug_types", ELF::SHT_PROGBITS, ELF::SHF_GROUP,
0, utostr(Hash));
diff --git a/lib/MC/MCObjectStreamer.cpp b/lib/MC/MCObjectStreamer.cpp
index 6de02bcb02d8..a73c171bd1c0 100644
--- a/lib/MC/MCObjectStreamer.cpp
+++ b/lib/MC/MCObjectStreamer.cpp
@@ -54,21 +54,18 @@ void MCObjectStreamer::flushPendingLabels(MCFragment *F, uint64_t FOffset) {
}
}
-bool MCObjectStreamer::emitAbsoluteSymbolDiff(const MCSymbol *Hi,
+void MCObjectStreamer::emitAbsoluteSymbolDiff(const MCSymbol *Hi,
const MCSymbol *Lo,
unsigned Size) {
- // Must both be assigned to the same (valid) fragment.
- if (!Hi->getFragment() || Hi->getFragment() != Lo->getFragment())
- return false;
-
- // Must be a data fragment.
- if (!isa<MCDataFragment>(Hi->getFragment()))
- return false;
+ // If not assigned to the same (valid) fragment, fallback.
+ if (!Hi->getFragment() || Hi->getFragment() != Lo->getFragment()) {
+ MCStreamer::emitAbsoluteSymbolDiff(Hi, Lo, Size);
+ return;
+ }
assert(Hi->getOffset() >= Lo->getOffset() &&
"Expected Hi to be greater than Lo");
EmitIntValue(Hi->getOffset() - Lo->getOffset(), Size);
- return true;
}
void MCObjectStreamer::reset() {
diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp
index 20366dc4e4f5..9c1062f8f588 100644
--- a/lib/MC/MCParser/AsmParser.cpp
+++ b/lib/MC/MCParser/AsmParser.cpp
@@ -484,7 +484,7 @@ private:
void initializeDirectiveKindMap();
};
-}
+} // namespace
namespace llvm {
@@ -1306,8 +1306,10 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
MCSymbol *Sym;
if (LocalLabelVal == -1) {
if (ParsingInlineAsm && SI) {
- StringRef RewrittenLabel = SI->LookupInlineAsmLabel(IDVal, getSourceManager(), IDLoc, true);
- assert(RewrittenLabel.size() && "We should have an internal name here.");
+ StringRef RewrittenLabel =
+ SI->LookupInlineAsmLabel(IDVal, getSourceManager(), IDLoc, true);
+ assert(RewrittenLabel.size() &&
+ "We should have an internal name here.");
Info.AsmRewrites->push_back(AsmRewrite(AOK_Label, IDLoc,
IDVal.size(), RewrittenLabel));
IDVal = RewrittenLabel;
@@ -1942,7 +1944,7 @@ public:
private:
AsmLexer &Lexer;
};
-}
+} // namespace
bool AsmParser::parseMacroArgument(MCAsmMacroArgument &MA, bool Vararg) {
diff --git a/lib/MC/MCParser/CMakeLists.txt b/lib/MC/MCParser/CMakeLists.txt
index 957c94edc53e..99fdd0167993 100644
--- a/lib/MC/MCParser/CMakeLists.txt
+++ b/lib/MC/MCParser/CMakeLists.txt
@@ -10,5 +10,5 @@ add_llvm_library(LLVMMCParser
MCTargetAsmParser.cpp
ADDITIONAL_HEADER_DIRS
- ${LLVM_MAIN_INCLUDE_DIR}/llvm/MCParser
+ ${LLVM_MAIN_INCLUDE_DIR}/llvm/MC/MCParser
)
diff --git a/lib/MC/MCParser/COFFAsmParser.cpp b/lib/MC/MCParser/COFFAsmParser.cpp
index f09bce005d6a..1480f5b4576c 100644
--- a/lib/MC/MCParser/COFFAsmParser.cpp
+++ b/lib/MC/MCParser/COFFAsmParser.cpp
@@ -145,7 +145,7 @@ public:
COFFAsmParser() {}
};
-} // end annonomous namespace.
+} // namespace
static SectionKind computeSectionKind(unsigned Flags) {
if (Flags & COFF::IMAGE_SCN_MEM_EXECUTE)
diff --git a/lib/MC/MCParser/ELFAsmParser.cpp b/lib/MC/MCParser/ELFAsmParser.cpp
index e3585bd27632..e131b238965d 100644
--- a/lib/MC/MCParser/ELFAsmParser.cpp
+++ b/lib/MC/MCParser/ELFAsmParser.cpp
@@ -154,7 +154,7 @@ private:
unsigned parseSunStyleSectionFlags();
};
-}
+} // namespace
/// ParseDirectiveSymbolAttribute
/// ::= { ".local", ".weak", ... } [ identifier ( , identifier )* ]
diff --git a/lib/MC/MCStreamer.cpp b/lib/MC/MCStreamer.cpp
index 011969a3da01..7fbbbd95b560 100644
--- a/lib/MC/MCStreamer.cpp
+++ b/lib/MC/MCStreamer.cpp
@@ -15,6 +15,7 @@
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSection.h"
@@ -601,6 +602,11 @@ void MCStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
TS->emitAssignment(Symbol, Value);
}
+void MCTargetStreamer::prettyPrintAsm(MCInstPrinter &InstPrinter, raw_ostream &OS,
+ const MCInst &Inst, const MCSubtargetInfo &STI) {
+ InstPrinter.printInst(&Inst, OS, "", STI);
+}
+
void MCStreamer::visitUsedSymbol(const MCSymbol &Sym) {
}
@@ -638,6 +644,25 @@ void MCStreamer::EmitInstruction(const MCInst &Inst,
visitUsedExpr(*Inst.getOperand(i).getExpr());
}
+void MCStreamer::emitAbsoluteSymbolDiff(const MCSymbol *Hi, const MCSymbol *Lo,
+ unsigned Size) {
+ // Get the Hi-Lo expression.
+ const MCExpr *Diff =
+ MCBinaryExpr::createSub(MCSymbolRefExpr::create(Hi, Context),
+ MCSymbolRefExpr::create(Lo, Context), Context);
+
+ const MCAsmInfo *MAI = Context.getAsmInfo();
+ if (!MAI->doesSetDirectiveSuppressesReloc()) {
+ EmitValue(Diff, Size);
+ return;
+ }
+
+ // Otherwise, emit with .set (aka assignment).
+ MCSymbol *SetLabel = Context.createTempSymbol("set", true);
+ EmitAssignment(SetLabel, Diff);
+ EmitSymbolValue(SetLabel, Size);
+}
+
void MCStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {}
void MCStreamer::EmitThumbFunc(MCSymbol *Func) {}
void MCStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {}
diff --git a/lib/MC/MCSubtargetInfo.cpp b/lib/MC/MCSubtargetInfo.cpp
index 7954a02d83b2..ece775c4f08f 100644
--- a/lib/MC/MCSubtargetInfo.cpp
+++ b/lib/MC/MCSubtargetInfo.cpp
@@ -34,17 +34,12 @@ MCSubtargetInfo::InitCPUSchedModel(StringRef CPU) {
CPUSchedModel = MCSchedModel::GetDefaultSchedModel();
}
-void
-MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef C, StringRef FS,
- ArrayRef<SubtargetFeatureKV> PF,
- ArrayRef<SubtargetFeatureKV> PD,
- const SubtargetInfoKV *ProcSched,
- const MCWriteProcResEntry *WPR,
- const MCWriteLatencyEntry *WL,
- const MCReadAdvanceEntry *RA,
- const InstrStage *IS,
- const unsigned *OC,
- const unsigned *FP) {
+void MCSubtargetInfo::InitMCSubtargetInfo(
+ const Triple &TT, StringRef C, StringRef FS,
+ ArrayRef<SubtargetFeatureKV> PF, ArrayRef<SubtargetFeatureKV> PD,
+ const SubtargetInfoKV *ProcSched, const MCWriteProcResEntry *WPR,
+ const MCWriteLatencyEntry *WL, const MCReadAdvanceEntry *RA,
+ const InstrStage *IS, const unsigned *OC, const unsigned *FP) {
TargetTriple = TT;
CPU = C;
ProcFeatures = PF;
diff --git a/lib/MC/MCSymbol.cpp b/lib/MC/MCSymbol.cpp
index 8d07b7605cea..448422132808 100644
--- a/lib/MC/MCSymbol.cpp
+++ b/lib/MC/MCSymbol.cpp
@@ -9,6 +9,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -18,6 +19,24 @@ using namespace llvm;
// Sentinel value for the absolute pseudo section.
MCSection *MCSymbol::AbsolutePseudoSection = reinterpret_cast<MCSection *>(1);
+void *MCSymbol::operator new(size_t s, const StringMapEntry<bool> *Name,
+ MCContext &Ctx) {
+ // We may need more space for a Name to account for alignment. So allocate
+ // space for the storage type and not the name pointer.
+ size_t Size = s + (Name ? sizeof(NameEntryStorageTy) : 0);
+
+ // For safety, ensure that the alignment of a pointer is enough for an
+ // MCSymbol. This also ensures we don't need padding between the name and
+ // symbol.
+ static_assert((unsigned)AlignOf<MCSymbol>::Alignment <=
+ AlignOf<NameEntryStorageTy>::Alignment,
+ "Bad alignment of MCSymbol");
+ void *Storage = Ctx.allocate(Size, alignOf<NameEntryStorageTy>());
+ NameEntryStorageTy *Start = static_cast<NameEntryStorageTy*>(Storage);
+ NameEntryStorageTy *End = Start + (Name ? 1 : 0);
+ return End;
+}
+
void MCSymbol::setVariableValue(const MCExpr *Value) {
assert(!IsUsed && "Cannot set a variable that has already been used.");
assert(Value && "Invalid variable value!");
diff --git a/lib/MC/MCSymbolELF.cpp b/lib/MC/MCSymbolELF.cpp
index c3620651f883..6ec70ed3a9fe 100644
--- a/lib/MC/MCSymbolELF.cpp
+++ b/lib/MC/MCSymbolELF.cpp
@@ -36,12 +36,9 @@ enum {
ELF_WeakrefUsedInReloc_Shift = 11,
// One bit.
- ELF_UsedInReloc_Shift = 12,
-
- // One bit.
- ELF_BindingSet_Shift = 13
+ ELF_BindingSet_Shift = 12
};
-}
+} // namespace
void MCSymbolELF::setBinding(unsigned Binding) const {
setIsBindingSet();
@@ -175,15 +172,6 @@ unsigned MCSymbolELF::getOther() const {
return Other << 5;
}
-void MCSymbolELF::setUsedInReloc() const {
- uint32_t OtherFlags = getFlags() & ~(0x1 << ELF_UsedInReloc_Shift);
- setFlags(OtherFlags | (1 << ELF_UsedInReloc_Shift));
-}
-
-bool MCSymbolELF::isUsedInReloc() const {
- return getFlags() & (0x1 << ELF_UsedInReloc_Shift);
-}
-
void MCSymbolELF::setIsWeakrefUsedInReloc() const {
uint32_t OtherFlags = getFlags() & ~(0x1 << ELF_WeakrefUsedInReloc_Shift);
setFlags(OtherFlags | (1 << ELF_WeakrefUsedInReloc_Shift));
@@ -210,4 +198,4 @@ void MCSymbolELF::setIsBindingSet() const {
bool MCSymbolELF::isBindingSet() const {
return getFlags() & (0x1 << ELF_BindingSet_Shift);
}
-}
+} // namespace llvm
diff --git a/lib/MC/MCWin64EH.cpp b/lib/MC/MCWin64EH.cpp
index 1b73b7afb6a0..d8280c7c0141 100644
--- a/lib/MC/MCWin64EH.cpp
+++ b/lib/MC/MCWin64EH.cpp
@@ -247,6 +247,6 @@ void UnwindEmitter::EmitUnwindInfo(MCStreamer &Streamer,
llvm::EmitUnwindInfo(Streamer, info);
}
-}
+} // namespace Win64EH
} // End of namespace llvm
diff --git a/lib/MC/MCWinEH.cpp b/lib/MC/MCWinEH.cpp
index d5d9eadf39a0..9cf2edf2a56c 100644
--- a/lib/MC/MCWinEH.cpp
+++ b/lib/MC/MCWinEH.cpp
@@ -74,6 +74,6 @@ MCSection *UnwindEmitter::getXDataSection(const MCSymbol *Function,
return getUnwindInfoSection(".xdata", XData, Function, Context);
}
-}
-}
+} // namespace WinEH
+} // namespace llvm
diff --git a/lib/MC/WinCOFFObjectWriter.cpp b/lib/MC/WinCOFFObjectWriter.cpp
index 423c7dce45da..5bc1404e83aa 100644
--- a/lib/MC/WinCOFFObjectWriter.cpp
+++ b/lib/MC/WinCOFFObjectWriter.cpp
@@ -191,7 +191,7 @@ public:
void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
};
-}
+} // namespace
static inline void write_uint32_le(void *Data, uint32_t Value) {
support::endian::write<uint32_t, support::little, support::unaligned>(Data,
@@ -526,13 +526,12 @@ bool WinCOFFObjectWriter::ExportSymbol(const MCSymbol &Symbol,
if (!Symbol.isTemporary())
return true;
- // Absolute temporary labels are never visible.
- if (!Symbol.isInSection())
+ // Temporary variable symbols are invisible.
+ if (Symbol.isVariable())
return false;
- // For now, all non-variable symbols are exported,
- // the linker will sort the rest out for us.
- return !Symbol.isVariable();
+ // Absolute temporary labels are never visible.
+ return !Symbol.isAbsolute();
}
bool WinCOFFObjectWriter::IsPhysicalSection(COFFSection *S) {
diff --git a/lib/MC/WinCOFFStreamer.cpp b/lib/MC/WinCOFFStreamer.cpp
index 41fc8e4681ef..4ecdc3b79a76 100644
--- a/lib/MC/WinCOFFStreamer.cpp
+++ b/lib/MC/WinCOFFStreamer.cpp
@@ -164,7 +164,8 @@ void MCWinCOFFStreamer::EmitCOFFSafeSEH(MCSymbol const *Symbol) {
Triple::x86)
return;
- if (cast<MCSymbolCOFF>(Symbol)->isSafeSEH())
+ const MCSymbolCOFF *CSymbol = cast<MCSymbolCOFF>(Symbol);
+ if (CSymbol->isSafeSEH())
return;
MCSection *SXData = getContext().getObjectFileInfo()->getSXDataSection();
@@ -175,7 +176,12 @@ void MCWinCOFFStreamer::EmitCOFFSafeSEH(MCSymbol const *Symbol) {
new MCSafeSEHFragment(Symbol, SXData);
getAssembler().registerSymbol(*Symbol);
- cast<MCSymbolCOFF>(Symbol)->setIsSafeSEH();
+ CSymbol->setIsSafeSEH();
+
+ // The Microsoft linker requires that the symbol type of a handler be
+ // function. Go ahead and oblige it here.
+ CSymbol->setType(COFF::IMAGE_SYM_DTYPE_FUNCTION
+ << COFF::SCT_COMPLEX_TYPE_SHIFT);
}
void MCWinCOFFStreamer::EmitCOFFSectionIndex(MCSymbol const *Symbol) {
@@ -285,5 +291,5 @@ LLVM_ATTRIBUTE_NORETURN
void MCWinCOFFStreamer::FatalError(const Twine &Msg) const {
getContext().reportFatalError(SMLoc(), Msg);
}
-}
+} // namespace llvm
diff --git a/lib/Makefile b/lib/Makefile
index f75ca584dbe0..9b76126b80a9 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,6 +12,6 @@ include $(LEVEL)/Makefile.config
PARALLEL_DIRS := IR AsmParser Bitcode Analysis Transforms CodeGen Target \
ExecutionEngine Linker LTO MC Object Option DebugInfo \
- IRReader LineEditor ProfileData Passes
+ IRReader LineEditor ProfileData Passes LibDriver
include $(LEVEL)/Makefile.common
diff --git a/lib/Object/ArchiveWriter.cpp b/lib/Object/ArchiveWriter.cpp
index 90a736f3baf4..00a56d13bfed 100644
--- a/lib/Object/ArchiveWriter.cpp
+++ b/lib/Object/ArchiveWriter.cpp
@@ -18,6 +18,8 @@
#include "llvm/Object/Archive.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/Errc.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/Path.h"
@@ -70,7 +72,7 @@ NewArchiveIterator::getFD(sys::fs::file_status &NewStatus) const {
// Linux cannot open directories with open(2), although
// cygwin and *bsd can.
if (NewStatus.type() == sys::fs::file_type::directory_file)
- return make_error_code(std::errc::is_a_directory);
+ return make_error_code(errc::is_a_directory);
return NewFD;
}
@@ -82,9 +84,7 @@ static void printWithSpacePadding(raw_fd_ostream &OS, T Data, unsigned Size,
OS << Data;
unsigned SizeSoFar = OS.tell() - OldPos;
if (Size > SizeSoFar) {
- unsigned Remaining = Size - SizeSoFar;
- for (unsigned I = 0; I < Remaining; ++I)
- OS << ' ';
+ OS.indent(Size - SizeSoFar);
} else if (Size < SizeSoFar) {
assert(MayTruncate && "Data doesn't fit in Size");
// Some of the data this is used for (like UID) can be larger than the
@@ -93,12 +93,8 @@ static void printWithSpacePadding(raw_fd_ostream &OS, T Data, unsigned Size,
}
}
-static void print32BE(raw_fd_ostream &Out, unsigned Val) {
- // FIXME: Should use Endian.h here.
- for (int I = 3; I >= 0; --I) {
- char V = (Val >> (8 * I)) & 0xff;
- Out << V;
- }
+static void print32BE(raw_ostream &Out, uint32_t Val) {
+ support::endian::Writer<support::big>(Out).write(Val);
}
static void printRestOfMemberHeader(raw_fd_ostream &Out,
diff --git a/lib/Object/CMakeLists.txt b/lib/Object/CMakeLists.txt
index 17aac8b41211..de809187191b 100644
--- a/lib/Object/CMakeLists.txt
+++ b/lib/Object/CMakeLists.txt
@@ -18,4 +18,7 @@ add_llvm_library(LLVMObject
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/Object
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/Object/COFFObjectFile.cpp b/lib/Object/COFFObjectFile.cpp
index 1055b987d7ef..e2f559eec72d 100644
--- a/lib/Object/COFFObjectFile.cpp
+++ b/lib/Object/COFFObjectFile.cpp
@@ -991,19 +991,6 @@ symbol_iterator COFFObjectFile::getRelocationSymbol(DataRefImpl Rel) const {
return symbol_iterator(SymbolRef(Ref, this));
}
-section_iterator COFFObjectFile::getRelocationSection(DataRefImpl Rel) const {
- symbol_iterator Sym = getRelocationSymbol(Rel);
- if (Sym == symbol_end())
- return section_end();
- COFFSymbolRef Symb = getCOFFSymbol(*Sym);
- if (!Symb.isSection())
- return section_end();
- section_iterator Res(section_end());
- if (getSymbolSection(Sym->getRawDataRefImpl(),Res))
- return section_end();
- return Res;
-}
-
std::error_code COFFObjectFile::getRelocationType(DataRefImpl Rel,
uint64_t &Res) const {
const coff_relocation* R = toRel(Rel);
diff --git a/lib/Object/COFFYAML.cpp b/lib/Object/COFFYAML.cpp
index 9a24b531da9e..dda4b7f8c87e 100644
--- a/lib/Object/COFFYAML.cpp
+++ b/lib/Object/COFFYAML.cpp
@@ -335,7 +335,7 @@ struct NDLLCharacteristics {
COFF::DLLCharacteristics Characteristics;
};
-}
+} // namespace
void MappingTraits<COFFYAML::Relocation>::mapping(IO &IO,
COFFYAML::Relocation &Rel) {
@@ -497,5 +497,5 @@ void MappingTraits<COFFYAML::Object>::mapping(IO &IO, COFFYAML::Object &Obj) {
IO.mapRequired("symbols", Obj.Symbols);
}
-}
-}
+} // namespace yaml
+} // namespace llvm
diff --git a/lib/Object/ELFYAML.cpp b/lib/Object/ELFYAML.cpp
index 78087d62ada0..50730a99655c 100644
--- a/lib/Object/ELFYAML.cpp
+++ b/lib/Object/ELFYAML.cpp
@@ -44,7 +44,7 @@ ScalarEnumerationTraits<ELFYAML::ELF_EM>::enumeration(IO &IO,
ECase(EM_386)
ECase(EM_68K)
ECase(EM_88K)
- ECase(EM_486)
+ ECase(EM_IAMCU)
ECase(EM_860)
ECase(EM_MIPS)
ECase(EM_S370)
@@ -590,7 +590,7 @@ struct NormalizedOther {
ELFYAML::ELF_STV Visibility;
ELFYAML::ELF_STO Other;
};
-}
+} // namespace
void MappingTraits<ELFYAML::Symbol>::mapping(IO &IO, ELFYAML::Symbol &Symbol) {
IO.mapOptional("Name", Symbol.Name, StringRef());
@@ -723,7 +723,7 @@ struct NormalizedMips64RelType {
ELFYAML::ELF_REL Type3;
ELFYAML::ELF_RSS SpecSym;
};
-}
+} // namespace
void MappingTraits<ELFYAML::Relocation>::mapping(IO &IO,
ELFYAML::Relocation &Rel) {
diff --git a/lib/Object/IRObjectFile.cpp b/lib/Object/IRObjectFile.cpp
index e89cb8ead36d..e90e08d786f1 100644
--- a/lib/Object/IRObjectFile.cpp
+++ b/lib/Object/IRObjectFile.cpp
@@ -45,22 +45,22 @@ IRObjectFile::IRObjectFile(MemoryBufferRef Object, std::unique_ptr<Module> Mod)
if (InlineAsm.empty())
return;
- StringRef Triple = M->getTargetTriple();
+ Triple TT(M->getTargetTriple());
std::string Err;
- const Target *T = TargetRegistry::lookupTarget(Triple, Err);
+ const Target *T = TargetRegistry::lookupTarget(TT.str(), Err);
if (!T)
return;
- std::unique_ptr<MCRegisterInfo> MRI(T->createMCRegInfo(Triple));
+ std::unique_ptr<MCRegisterInfo> MRI(T->createMCRegInfo(TT.str()));
if (!MRI)
return;
- std::unique_ptr<MCAsmInfo> MAI(T->createMCAsmInfo(*MRI, Triple));
+ std::unique_ptr<MCAsmInfo> MAI(T->createMCAsmInfo(*MRI, TT.str()));
if (!MAI)
return;
std::unique_ptr<MCSubtargetInfo> STI(
- T->createMCSubtargetInfo(Triple, "", ""));
+ T->createMCSubtargetInfo(TT.str(), "", ""));
if (!STI)
return;
@@ -70,7 +70,7 @@ IRObjectFile::IRObjectFile(MemoryBufferRef Object, std::unique_ptr<Module> Mod)
MCObjectFileInfo MOFI;
MCContext MCCtx(MAI.get(), MRI.get(), &MOFI);
- MOFI.InitMCObjectFileInfo(Triple, Reloc::Default, CodeModel::Default, MCCtx);
+ MOFI.InitMCObjectFileInfo(TT, Reloc::Default, CodeModel::Default, MCCtx);
std::unique_ptr<RecordStreamer> Streamer(new RecordStreamer(MCCtx));
T->createNullTargetStreamer(*Streamer);
@@ -198,6 +198,9 @@ std::error_code IRObjectFile::printSymbolName(raw_ostream &OS,
return std::error_code();
}
+ if (GV->hasDLLImportStorageClass())
+ OS << "__imp_";
+
if (Mang)
Mang->getNameWithPrefix(OS, GV, false);
else
@@ -301,12 +304,12 @@ llvm::object::IRObjectFile::create(MemoryBufferRef Object,
std::unique_ptr<MemoryBuffer> Buff(
MemoryBuffer::getMemBuffer(BCOrErr.get(), false));
- ErrorOr<Module *> MOrErr =
+ ErrorOr<std::unique_ptr<Module>> MOrErr =
getLazyBitcodeModule(std::move(Buff), Context, nullptr,
/*ShouldLazyLoadMetadata*/ true);
if (std::error_code EC = MOrErr.getError())
return EC;
- std::unique_ptr<Module> M(MOrErr.get());
+ std::unique_ptr<Module> &M = MOrErr.get();
return llvm::make_unique<IRObjectFile>(Object, std::move(M));
}
diff --git a/lib/Object/MachOObjectFile.cpp b/lib/Object/MachOObjectFile.cpp
index d02ca48a7d19..f76dd0d3f7ce 100644
--- a/lib/Object/MachOObjectFile.cpp
+++ b/lib/Object/MachOObjectFile.cpp
@@ -1232,6 +1232,7 @@ bool MachOObjectFile::isValidArch(StringRef ArchFlag) {
.Case("armv5e", true)
.Case("armv6", true)
.Case("armv6m", true)
+ .Case("armv7", true)
.Case("armv7em", true)
.Case("armv7k", true)
.Case("armv7m", true)
@@ -2011,9 +2012,11 @@ MachOObjectFile::getAnyRelocationSection(
const MachO::any_relocation_info &RE) const {
if (isRelocationScattered(RE) || getPlainRelocationExternal(RE))
return *section_end();
- unsigned SecNum = getPlainRelocationSymbolNum(RE) - 1;
+ unsigned SecNum = getPlainRelocationSymbolNum(RE);
+ if (SecNum == MachO::R_ABS || SecNum > Sections.size())
+ return *section_end();
DataRefImpl DRI;
- DRI.d.a = SecNum;
+ DRI.d.a = SecNum - 1;
return SectionRef(DRI, this);
}
diff --git a/lib/Object/RecordStreamer.h b/lib/Object/RecordStreamer.h
index d8610610c332..d694a9fb8b0d 100644
--- a/lib/Object/RecordStreamer.h
+++ b/lib/Object/RecordStreamer.h
@@ -38,5 +38,5 @@ public:
void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
unsigned ByteAlignment) override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Option/OptTable.cpp b/lib/Option/OptTable.cpp
index 96ba1836f4cd..c37f193fa64e 100644
--- a/lib/Option/OptTable.cpp
+++ b/lib/Option/OptTable.cpp
@@ -79,8 +79,8 @@ static inline bool operator<(const OptTable::Info &A, const OptTable::Info &B) {
static inline bool operator<(const OptTable::Info &I, const char *Name) {
return StrCmpOptionNameIgnoreCase(I.Name, Name) < 0;
}
-}
-}
+} // namespace opt
+} // namespace llvm
OptSpecifier::OptSpecifier(const Option *Opt) : ID(Opt->getID()) {}
diff --git a/lib/ProfileData/CMakeLists.txt b/lib/ProfileData/CMakeLists.txt
index 282760f0e66b..22cca4b44df5 100644
--- a/lib/ProfileData/CMakeLists.txt
+++ b/lib/ProfileData/CMakeLists.txt
@@ -11,4 +11,7 @@ add_llvm_library(LLVMProfileData
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/ProfileData
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/ProfileData/CoverageMapping.cpp b/lib/ProfileData/CoverageMapping.cpp
index bbac5c26b1eb..b6c2489bd5c6 100644
--- a/lib/ProfileData/CoverageMapping.cpp
+++ b/lib/ProfileData/CoverageMapping.cpp
@@ -19,6 +19,7 @@
#include "llvm/ProfileData/CoverageMappingReader.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Errc.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
@@ -154,11 +155,11 @@ ErrorOr<int64_t> CounterMappingContext::evaluate(const Counter &C) const {
return 0;
case Counter::CounterValueReference:
if (C.getCounterID() >= CounterValues.size())
- return std::make_error_code(std::errc::argument_out_of_domain);
+ return make_error_code(errc::argument_out_of_domain);
return CounterValues[C.getCounterID()];
case Counter::Expression: {
if (C.getExpressionID() >= Expressions.size())
- return std::make_error_code(std::errc::argument_out_of_domain);
+ return make_error_code(errc::argument_out_of_domain);
const auto &E = Expressions[C.getExpressionID()];
ErrorOr<int64_t> LHS = evaluate(E.LHS);
if (!LHS)
@@ -349,7 +350,7 @@ public:
return Segments;
}
};
-}
+} // namespace
std::vector<StringRef> CoverageMapping::getUniqueSourceFiles() const {
std::vector<StringRef> Filenames;
@@ -520,7 +521,7 @@ class CoverageMappingErrorCategoryType : public std::error_category {
llvm_unreachable("A value of coveragemap_error has no message.");
}
};
-}
+} // namespace
static ManagedStatic<CoverageMappingErrorCategoryType> ErrorCategory;
diff --git a/lib/ProfileData/CoverageMappingReader.cpp b/lib/ProfileData/CoverageMappingReader.cpp
index ec531c3753ec..32de0babcb2f 100644
--- a/lib/ProfileData/CoverageMappingReader.cpp
+++ b/lib/ProfileData/CoverageMappingReader.cpp
@@ -315,7 +315,7 @@ struct SectionData {
return std::error_code();
}
};
-}
+} // namespace
template <typename T, support::endianness Endian>
std::error_code readCoverageMappingData(
diff --git a/lib/ProfileData/CoverageMappingWriter.cpp b/lib/ProfileData/CoverageMappingWriter.cpp
index d90d2f565155..128003c270d7 100644
--- a/lib/ProfileData/CoverageMappingWriter.cpp
+++ b/lib/ProfileData/CoverageMappingWriter.cpp
@@ -74,7 +74,7 @@ public:
return C;
}
};
-}
+} // namespace
/// \brief Encode the counter.
///
diff --git a/lib/ProfileData/InstrProf.cpp b/lib/ProfileData/InstrProf.cpp
index 92822a71402f..805d6d16aace 100644
--- a/lib/ProfileData/InstrProf.cpp
+++ b/lib/ProfileData/InstrProf.cpp
@@ -54,7 +54,7 @@ class InstrProfErrorCategoryType : public std::error_category {
llvm_unreachable("A value of instrprof_error has no message.");
}
};
-}
+} // namespace
static ManagedStatic<InstrProfErrorCategoryType> ErrorCategory;
diff --git a/lib/ProfileData/InstrProfIndexed.h b/lib/ProfileData/InstrProfIndexed.h
index ebca7b22fbfb..afd8cfb74306 100644
--- a/lib/ProfileData/InstrProfIndexed.h
+++ b/lib/ProfileData/InstrProfIndexed.h
@@ -49,7 +49,7 @@ static inline uint64_t ComputeHash(HashT Type, StringRef K) {
const uint64_t Magic = 0x8169666f72706cff; // "\xfflprofi\x81"
const uint64_t Version = 2;
const HashT HashType = HashT::MD5;
-}
+} // namespace IndexedInstrProf
} // end namespace llvm
diff --git a/lib/ProfileData/InstrProfWriter.cpp b/lib/ProfileData/InstrProfWriter.cpp
index 2188543ed61c..efac2926b6cf 100644
--- a/lib/ProfileData/InstrProfWriter.cpp
+++ b/lib/ProfileData/InstrProfWriter.cpp
@@ -69,7 +69,7 @@ public:
}
}
};
-}
+} // namespace
std::error_code
InstrProfWriter::addFunctionCounts(StringRef FunctionName,
diff --git a/lib/ProfileData/SampleProf.cpp b/lib/ProfileData/SampleProf.cpp
index 920c48a24640..e2894c64be01 100644
--- a/lib/ProfileData/SampleProf.cpp
+++ b/lib/ProfileData/SampleProf.cpp
@@ -42,7 +42,7 @@ class SampleProfErrorCategoryType : public std::error_category {
llvm_unreachable("A value of sampleprof_error has no message.");
}
};
-}
+} // namespace
static ManagedStatic<SampleProfErrorCategoryType> ErrorCategory;
diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp
index 4b0a0e5d4819..48830e83e9a4 100644
--- a/lib/Support/APFloat.cpp
+++ b/lib/Support/APFloat.cpp
@@ -90,7 +90,7 @@ namespace llvm {
const unsigned int maxPowerOfFiveExponent = maxExponent + maxPrecision - 1;
const unsigned int maxPowerOfFiveParts = 2 + ((maxPowerOfFiveExponent * 815)
/ (351 * integerPartWidth));
-}
+} // namespace llvm
/* A bunch of private, handy routines. */
@@ -3539,7 +3539,7 @@ namespace {
exp += FirstSignificant;
buffer.erase(&buffer[0], &buffer[FirstSignificant]);
}
-}
+} // namespace
void APFloat::toString(SmallVectorImpl<char> &Str,
unsigned FormatPrecision,
diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp
index 23f89bb66f9e..aa026d49c070 100644
--- a/lib/Support/APInt.cpp
+++ b/lib/Support/APInt.cpp
@@ -2331,7 +2331,7 @@ namespace {
{
return findFirstSet(value, ZB_Max);
}
-}
+} // namespace
/* Sets the least significant part of a bignum to the input value, and
zeroes out higher parts. */
diff --git a/lib/Support/ARMBuildAttrs.cpp b/lib/Support/ARMBuildAttrs.cpp
index 960a0f13c674..9c8bb15dc0ae 100644
--- a/lib/Support/ARMBuildAttrs.cpp
+++ b/lib/Support/ARMBuildAttrs.cpp
@@ -66,7 +66,7 @@ const struct {
{ ARMBuildAttrs::ABI_align_needed, "Tag_ABI_align8_needed" },
{ ARMBuildAttrs::ABI_align_preserved, "Tag_ABI_align8_preserved" },
};
-}
+} // namespace
namespace llvm {
namespace ARMBuildAttrs {
@@ -90,6 +90,6 @@ int AttrTypeFromString(StringRef Tag) {
return ARMAttributeTags[TI].Attr;
return -1;
}
-}
-}
+} // namespace ARMBuildAttrs
+} // namespace llvm
diff --git a/lib/Support/ARMWinEH.cpp b/lib/Support/ARMWinEH.cpp
index 03c150f1150b..8d21ca5698c1 100644
--- a/lib/Support/ARMWinEH.cpp
+++ b/lib/Support/ARMWinEH.cpp
@@ -32,7 +32,7 @@ std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF) {
return std::make_pair(GPRMask, VFPMask);
}
-}
-}
-}
+} // namespace WinEH
+} // namespace ARM
+} // namespace llvm
diff --git a/lib/Support/Allocator.cpp b/lib/Support/Allocator.cpp
index f48edac0598c..021037a2b3dd 100644
--- a/lib/Support/Allocator.cpp
+++ b/lib/Support/Allocator.cpp
@@ -37,4 +37,4 @@ void PrintRecyclerStats(size_t Size,
<< "Number of elements free for recycling: " << FreeListSize << '\n';
}
-}
+} // namespace llvm
diff --git a/lib/Support/CMakeLists.txt b/lib/Support/CMakeLists.txt
index 79aae1584357..eac189b67a47 100644
--- a/lib/Support/CMakeLists.txt
+++ b/lib/Support/CMakeLists.txt
@@ -83,6 +83,7 @@ add_llvm_library(LLVMSupport
StringExtras.cpp
StringMap.cpp
StringPool.cpp
+ StringSaver.cpp
StringRef.cpp
SystemUtils.cpp
TargetParser.cpp
diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp
index 3cabc54a73aa..3638f0df5e2d 100644
--- a/lib/Support/CommandLine.cpp
+++ b/lib/Support/CommandLine.cpp
@@ -32,6 +32,7 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/StringSaver.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdlib>
#include <map>
@@ -60,8 +61,8 @@ TEMPLATE_INSTANTIATION(class opt<int>);
TEMPLATE_INSTANTIATION(class opt<std::string>);
TEMPLATE_INSTANTIATION(class opt<char>);
TEMPLATE_INSTANTIATION(class opt<bool>);
-}
-} // end namespace llvm::cl
+} // namespace cl
+} // namespace llvm
// Pin the vtables to this file.
void GenericOptionValue::anchor() {}
@@ -78,7 +79,6 @@ void parser<double>::anchor() {}
void parser<float>::anchor() {}
void parser<std::string>::anchor() {}
void parser<char>::anchor() {}
-void StringSaver::anchor() {}
//===----------------------------------------------------------------------===//
@@ -564,7 +564,7 @@ void cl::TokenizeGNUCommandLine(StringRef Src, StringSaver &Saver,
// End the token if this is whitespace.
if (isWhitespace(Src[I])) {
if (!Token.empty())
- NewArgv.push_back(Saver.SaveString(Token.c_str()));
+ NewArgv.push_back(Saver.save(Token.c_str()));
Token.clear();
continue;
}
@@ -575,7 +575,7 @@ void cl::TokenizeGNUCommandLine(StringRef Src, StringSaver &Saver,
// Append the last token after hitting EOF with no whitespace.
if (!Token.empty())
- NewArgv.push_back(Saver.SaveString(Token.c_str()));
+ NewArgv.push_back(Saver.save(Token.c_str()));
// Mark the end of response files
if (MarkEOLs)
NewArgv.push_back(nullptr);
@@ -656,7 +656,7 @@ void cl::TokenizeWindowsCommandLine(StringRef Src, StringSaver &Saver,
if (State == UNQUOTED) {
// Whitespace means the end of the token.
if (isWhitespace(Src[I])) {
- NewArgv.push_back(Saver.SaveString(Token.c_str()));
+ NewArgv.push_back(Saver.save(Token.c_str()));
Token.clear();
State = INIT;
// Mark the end of lines in response files
@@ -691,7 +691,7 @@ void cl::TokenizeWindowsCommandLine(StringRef Src, StringSaver &Saver,
}
// Append the last token after hitting EOF with no whitespace.
if (!Token.empty())
- NewArgv.push_back(Saver.SaveString(Token.c_str()));
+ NewArgv.push_back(Saver.save(Token.c_str()));
// Mark the end of response files
if (MarkEOLs)
NewArgv.push_back(nullptr);
@@ -779,26 +779,6 @@ bool cl::ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
return AllExpanded;
}
-namespace {
-class StrDupSaver : public StringSaver {
- std::vector<char *> Dups;
-
-public:
- ~StrDupSaver() override {
- for (std::vector<char *>::iterator I = Dups.begin(), E = Dups.end(); I != E;
- ++I) {
- char *Dup = *I;
- free(Dup);
- }
- }
- const char *SaveString(const char *Str) override {
- char *Dup = strdup(Str);
- Dups.push_back(Dup);
- return Dup;
- }
-};
-}
-
/// ParseEnvironmentOptions - An alternative entry point to the
/// CommandLine library, which allows you to read the program's name
/// from the caller (as PROGNAME) and its command-line arguments from
@@ -818,8 +798,9 @@ void cl::ParseEnvironmentOptions(const char *progName, const char *envVar,
// Get program's "name", which we wouldn't know without the caller
// telling us.
SmallVector<const char *, 20> newArgv;
- StrDupSaver Saver;
- newArgv.push_back(Saver.SaveString(progName));
+ BumpPtrAllocator A;
+ BumpPtrStringSaver Saver(A);
+ newArgv.push_back(Saver.save(progName));
// Parse the value of the environment variable into a "command line"
// and hand it off to ParseCommandLineOptions().
@@ -840,7 +821,8 @@ void CommandLineParser::ParseCommandLineOptions(int argc,
// Expand response files.
SmallVector<const char *, 20> newArgv(argv, argv + argc);
- StrDupSaver Saver;
+ BumpPtrAllocator A;
+ BumpPtrStringSaver Saver(A);
ExpandResponseFiles(Saver, TokenizeGNUCommandLine, newArgv);
argv = &newArgv[0];
argc = static_cast<int>(newArgv.size());
diff --git a/lib/Support/CrashRecoveryContext.cpp b/lib/Support/CrashRecoveryContext.cpp
index aba0f1ddeee8..929f5dacd729 100644
--- a/lib/Support/CrashRecoveryContext.cpp
+++ b/lib/Support/CrashRecoveryContext.cpp
@@ -60,7 +60,7 @@ public:
}
};
-}
+} // namespace
static ManagedStatic<sys::Mutex> gCrashRecoveryContextMutex;
static bool gCrashRecoveryEnabled = false;
diff --git a/lib/Support/DAGDeltaAlgorithm.cpp b/lib/Support/DAGDeltaAlgorithm.cpp
index f1a334bfc7be..0f447808cc4d 100644
--- a/lib/Support/DAGDeltaAlgorithm.cpp
+++ b/lib/Support/DAGDeltaAlgorithm.cpp
@@ -175,7 +175,7 @@ public:
: DDAI(DDAI), Required(Required) {}
};
-}
+} // namespace
DAGDeltaAlgorithmImpl::DAGDeltaAlgorithmImpl(
DAGDeltaAlgorithm &DDA, const changeset_ty &Changes,
diff --git a/lib/Support/DataStream.cpp b/lib/Support/DataStream.cpp
index c24315526cff..ad05494f9c67 100644
--- a/lib/Support/DataStream.cpp
+++ b/lib/Support/DataStream.cpp
@@ -16,6 +16,7 @@
#include "llvm/Support/DataStream.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Program.h"
#include <string>
@@ -71,18 +72,15 @@ public:
}
};
-}
+} // namespace
-namespace llvm {
-DataStreamer *getDataFileStreamer(const std::string &Filename,
- std::string *StrError) {
- DataFileStreamer *s = new DataFileStreamer();
+std::unique_ptr<DataStreamer>
+llvm::getDataFileStreamer(const std::string &Filename, std::string *StrError) {
+ std::unique_ptr<DataFileStreamer> s = make_unique<DataFileStreamer>();
if (std::error_code e = s->OpenFile(Filename)) {
*StrError = std::string("Could not open ") + Filename + ": " +
e.message() + "\n";
return nullptr;
}
- return s;
-}
-
+ return std::move(s);
}
diff --git a/lib/Support/Debug.cpp b/lib/Support/Debug.cpp
index 47751fce3fcd..2052662ab1bf 100644
--- a/lib/Support/Debug.cpp
+++ b/lib/Support/Debug.cpp
@@ -99,7 +99,7 @@ struct DebugOnlyOpt {
}
};
-}
+} // namespace
static DebugOnlyOpt DebugOnlyOptLoc;
diff --git a/lib/Support/FileOutputBuffer.cpp b/lib/Support/FileOutputBuffer.cpp
index 307ff09afedc..6f064c983611 100644
--- a/lib/Support/FileOutputBuffer.cpp
+++ b/lib/Support/FileOutputBuffer.cpp
@@ -109,4 +109,4 @@ std::error_code FileOutputBuffer::commit() {
// Rename file to final name.
return sys::fs::rename(Twine(TempPath), Twine(FinalPath));
}
-} // namespace
+} // namespace llvm
diff --git a/lib/Support/Locale.cpp b/lib/Support/Locale.cpp
index 35ddf7f11bf6..d5cb72b5db3a 100644
--- a/lib/Support/Locale.cpp
+++ b/lib/Support/Locale.cpp
@@ -15,7 +15,7 @@ int columnWidth(StringRef Text) {
bool isPrint(int UCS) {
#if LLVM_ON_WIN32
- // Restrict characters that we'll try to print to the the lower part of ASCII
+ // Restrict characters that we'll try to print to the lower part of ASCII
// except for the control characters (0x20 - 0x7E). In general one can not
// reliably output code points U+0080 and higher using narrow character C/C++
// output functions in Windows, because the meaning of the upper 128 codes is
diff --git a/lib/Support/MD5.cpp b/lib/Support/MD5.cpp
index ceab580984d4..6ed81fbe49e0 100644
--- a/lib/Support/MD5.cpp
+++ b/lib/Support/MD5.cpp
@@ -283,4 +283,4 @@ void MD5::stringifyResult(MD5Result &Result, SmallString<32> &Str) {
Res << format("%.2x", Result[i]);
}
-}
+} // namespace llvm
diff --git a/lib/Support/MathExtras.cpp b/lib/Support/MathExtras.cpp
index ba0924540ceb..9265a43d38c3 100644
--- a/lib/Support/MathExtras.cpp
+++ b/lib/Support/MathExtras.cpp
@@ -29,4 +29,4 @@ namespace llvm {
const float huge_valf = HUGE_VALF;
#endif
-}
+} // namespace llvm
diff --git a/lib/Support/MemoryBuffer.cpp b/lib/Support/MemoryBuffer.cpp
index 98862e96b749..1d69b9692c24 100644
--- a/lib/Support/MemoryBuffer.cpp
+++ b/lib/Support/MemoryBuffer.cpp
@@ -94,7 +94,7 @@ public:
return MemoryBuffer_Malloc;
}
};
-}
+} // namespace
static ErrorOr<std::unique_ptr<MemoryBuffer>>
getFileAux(const Twine &Filename, int64_t FileSize, uint64_t MapSize,
@@ -220,7 +220,7 @@ public:
return MemoryBuffer_MMap;
}
};
-}
+} // namespace
static ErrorOr<std::unique_ptr<MemoryBuffer>>
getMemoryBufferForStream(int FD, const Twine &BufferName) {
diff --git a/lib/Support/Mutex.cpp b/lib/Support/Mutex.cpp
index c8d3844d0c96..42867c94b737 100644
--- a/lib/Support/Mutex.cpp
+++ b/lib/Support/Mutex.cpp
@@ -110,7 +110,7 @@ MutexImpl::tryacquire()
return errorcode == 0;
}
-}
+} // namespace llvm
#elif defined(LLVM_ON_UNIX)
#include "Unix/Mutex.inc"
diff --git a/lib/Support/RWMutex.cpp b/lib/Support/RWMutex.cpp
index 3b6309cef21a..21ba5a428e6f 100644
--- a/lib/Support/RWMutex.cpp
+++ b/lib/Support/RWMutex.cpp
@@ -113,7 +113,7 @@ RWMutexImpl::writer_release()
return errorcode == 0;
}
-}
+} // namespace llvm
#elif defined(LLVM_ON_UNIX)
#include "Unix/RWMutex.inc"
diff --git a/lib/Support/SourceMgr.cpp b/lib/Support/SourceMgr.cpp
index d5e3157b064e..6d44a4d51f60 100644
--- a/lib/Support/SourceMgr.cpp
+++ b/lib/Support/SourceMgr.cpp
@@ -332,8 +332,8 @@ static bool isNonASCII(char c) {
return c & 0x80;
}
-void SMDiagnostic::print(const char *ProgName, raw_ostream &S,
- bool ShowColors) const {
+void SMDiagnostic::print(const char *ProgName, raw_ostream &S, bool ShowColors,
+ bool ShowKindLabel) const {
// Display colors only if OS supports colors.
ShowColors &= S.has_colors();
@@ -357,27 +357,29 @@ void SMDiagnostic::print(const char *ProgName, raw_ostream &S,
S << ": ";
}
- switch (Kind) {
- case SourceMgr::DK_Error:
- if (ShowColors)
- S.changeColor(raw_ostream::RED, true);
- S << "error: ";
- break;
- case SourceMgr::DK_Warning:
- if (ShowColors)
- S.changeColor(raw_ostream::MAGENTA, true);
- S << "warning: ";
- break;
- case SourceMgr::DK_Note:
- if (ShowColors)
- S.changeColor(raw_ostream::BLACK, true);
- S << "note: ";
- break;
- }
+ if (ShowKindLabel) {
+ switch (Kind) {
+ case SourceMgr::DK_Error:
+ if (ShowColors)
+ S.changeColor(raw_ostream::RED, true);
+ S << "error: ";
+ break;
+ case SourceMgr::DK_Warning:
+ if (ShowColors)
+ S.changeColor(raw_ostream::MAGENTA, true);
+ S << "warning: ";
+ break;
+ case SourceMgr::DK_Note:
+ if (ShowColors)
+ S.changeColor(raw_ostream::BLACK, true);
+ S << "note: ";
+ break;
+ }
- if (ShowColors) {
- S.resetColor();
- S.changeColor(raw_ostream::SAVEDCOLOR, true);
+ if (ShowColors) {
+ S.resetColor();
+ S.changeColor(raw_ostream::SAVEDCOLOR, true);
+ }
}
S << Message << '\n';
diff --git a/lib/Support/Statistic.cpp b/lib/Support/Statistic.cpp
index 56c3b0f5659f..90f5fdb019e7 100644
--- a/lib/Support/Statistic.cpp
+++ b/lib/Support/Statistic.cpp
@@ -60,7 +60,7 @@ public:
Stats.push_back(S);
}
};
-}
+} // namespace
static ManagedStatic<StatisticInfo> StatInfo;
static ManagedStatic<sys::SmartMutex<true> > StatLock;
diff --git a/lib/Support/StreamingMemoryObject.cpp b/lib/Support/StreamingMemoryObject.cpp
index 6c5652af04c2..891aa665e2c5 100644
--- a/lib/Support/StreamingMemoryObject.cpp
+++ b/lib/Support/StreamingMemoryObject.cpp
@@ -123,9 +123,10 @@ MemoryObject *getNonStreamedMemoryObject(const unsigned char *Start,
return new RawMemoryObject(Start, End);
}
-StreamingMemoryObject::StreamingMemoryObject(DataStreamer *streamer) :
- Bytes(kChunkSize), Streamer(streamer), BytesRead(0), BytesSkipped(0),
- ObjectSize(0), EOFReached(false) {
- BytesRead = streamer->GetBytes(&Bytes[0], kChunkSize);
-}
+StreamingMemoryObject::StreamingMemoryObject(
+ std::unique_ptr<DataStreamer> Streamer)
+ : Bytes(kChunkSize), Streamer(std::move(Streamer)), BytesRead(0),
+ BytesSkipped(0), ObjectSize(0), EOFReached(false) {
+ BytesRead = this->Streamer->GetBytes(&Bytes[0], kChunkSize);
}
+} // namespace llvm
diff --git a/lib/Support/StringSaver.cpp b/lib/Support/StringSaver.cpp
new file mode 100644
index 000000000000..d6b84e53dccd
--- /dev/null
+++ b/lib/Support/StringSaver.cpp
@@ -0,0 +1,19 @@
+//===-- StringSaver.cpp ---------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/StringSaver.h"
+
+using namespace llvm;
+
+const char *StringSaver::saveImpl(StringRef S) {
+ char *P = Alloc.Allocate<char>(S.size() + 1);
+ memcpy(P, S.data(), S.size());
+ P[S.size()] = '\0';
+ return P;
+}
diff --git a/lib/Support/TargetParser.cpp b/lib/Support/TargetParser.cpp
index 757483b95864..760cdc11f26a 100644
--- a/lib/Support/TargetParser.cpp
+++ b/lib/Support/TargetParser.cpp
@@ -276,7 +276,8 @@ bool ARMTargetParser::getFPUFeatures(unsigned FPUKind,
// FPU version subtarget features are inclusive of lower-numbered ones, so
// enable the one corresponding to this version and disable all that are
- // higher.
+ // higher. We also have to make sure to disable fp16 when vfp4 is disabled,
+ // as +vfp4 implies +fp16 but -vfp4 does not imply -fp16.
switch (FPUNames[FPUKind].FPUVersion) {
case 5:
Features.push_back("+fp-armv8");
@@ -287,18 +288,21 @@ bool ARMTargetParser::getFPUFeatures(unsigned FPUKind,
break;
case 3:
Features.push_back("+vfp3");
+ Features.push_back("-fp16");
Features.push_back("-vfp4");
Features.push_back("-fp-armv8");
break;
case 2:
Features.push_back("+vfp2");
Features.push_back("-vfp3");
+ Features.push_back("-fp16");
Features.push_back("-vfp4");
Features.push_back("-fp-armv8");
break;
case 0:
Features.push_back("-vfp2");
Features.push_back("-vfp3");
+ Features.push_back("-fp16");
Features.push_back("-vfp4");
Features.push_back("-fp-armv8");
break;
diff --git a/lib/Support/TimeValue.cpp b/lib/Support/TimeValue.cpp
index 136b93eceefa..caa5b5aa7e53 100644
--- a/lib/Support/TimeValue.cpp
+++ b/lib/Support/TimeValue.cpp
@@ -45,7 +45,7 @@ TimeValue::normalize( void ) {
}
}
-}
+} // namespace llvm
/// Include the platform-specific portion of TimeValue class
#ifdef LLVM_ON_UNIX
diff --git a/lib/Support/Timer.cpp b/lib/Support/Timer.cpp
index d7b65155d6ef..0ad253bec371 100644
--- a/lib/Support/Timer.cpp
+++ b/lib/Support/Timer.cpp
@@ -50,7 +50,7 @@ namespace {
InfoOutputFilename("info-output-file", cl::value_desc("filename"),
cl::desc("File to append -stats and -timer output to"),
cl::Hidden, cl::location(getLibSupportInfoOutputFilename()));
-}
+} // namespace
// CreateInfoOutputFile - Return a file stream to print our output on.
raw_ostream *llvm::CreateInfoOutputFile() {
@@ -218,7 +218,7 @@ public:
}
};
-}
+} // namespace
static ManagedStatic<Name2TimerMap> NamedTimers;
static ManagedStatic<Name2PairMap> NamedGroupedTimers;
diff --git a/lib/Support/Triple.cpp b/lib/Support/Triple.cpp
index ad99386e6574..072d4a0d79d8 100644
--- a/lib/Support/Triple.cpp
+++ b/lib/Support/Triple.cpp
@@ -59,6 +59,7 @@ const char *Triple::getArchTypeName(ArchType Kind) {
case spir: return "spir";
case spir64: return "spir64";
case kalimba: return "kalimba";
+ case shave: return "shave";
}
llvm_unreachable("Invalid ArchType!");
@@ -120,6 +121,7 @@ const char *Triple::getArchTypePrefix(ArchType Kind) {
case spir:
case spir64: return "spir";
case kalimba: return "kalimba";
+ case shave: return "shave";
}
}
@@ -252,6 +254,7 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) {
.Case("spir", spir)
.Case("spir64", spir64)
.Case("kalimba", kalimba)
+ .Case("shave", shave)
.Default(UnknownArch);
}
@@ -356,6 +359,7 @@ static Triple::ArchType parseArch(StringRef ArchName) {
.Case("spir", Triple::spir)
.Case("spir64", Triple::spir64)
.StartsWith("kalimba", Triple::kalimba)
+ .Case("shave", Triple::shave)
.Default(Triple::UnknownArch);
}
@@ -1004,6 +1008,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) {
case llvm::Triple::hsail:
case llvm::Triple::spir:
case llvm::Triple::kalimba:
+ case llvm::Triple::shave:
return 32;
case llvm::Triple::aarch64:
@@ -1075,6 +1080,7 @@ Triple Triple::get32BitArchVariant() const {
case Triple::thumbeb:
case Triple::x86:
case Triple::xcore:
+ case Triple::shave:
// Already 32-bit.
break;
@@ -1107,6 +1113,7 @@ Triple Triple::get64BitArchVariant() const {
case Triple::thumbeb:
case Triple::xcore:
case Triple::sparcel:
+ case Triple::shave:
T.setArch(UnknownArch);
break;
diff --git a/lib/Support/Unix/Process.inc b/lib/Support/Unix/Process.inc
index df13bd221739..b15cedd7f6dc 100644
--- a/lib/Support/Unix/Process.inc
+++ b/lib/Support/Unix/Process.inc
@@ -205,7 +205,7 @@ private:
int &FD;
bool KeepOpen;
};
-}
+} // namespace
std::error_code Process::FixupStandardFileDescriptors() {
int NullFD = -1;
diff --git a/lib/Support/Unix/Program.inc b/lib/Support/Unix/Program.inc
index 5816fb812e9f..dc633ab313e9 100644
--- a/lib/Support/Unix/Program.inc
+++ b/lib/Support/Unix/Program.inc
@@ -20,6 +20,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Config/config.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -92,7 +93,7 @@ ErrorOr<std::string> sys::findProgramByName(StringRef Name,
if (sys::fs::can_execute(FilePath.c_str()))
return std::string(FilePath.str()); // Found the executable!
}
- return std::errc::no_such_file_or_directory;
+ return errc::no_such_file_or_directory;
}
static bool RedirectIO(const StringRef *Path, int FD, std::string* ErrMsg) {
@@ -175,7 +176,7 @@ static void SetMemoryLimits (unsigned size)
#endif
}
-}
+} // namespace llvm
static bool Execute(ProcessInfo &PI, StringRef Program, const char **args,
const char **envp, const StringRef **redirects,
@@ -447,7 +448,7 @@ llvm::sys::writeFileWithEncoding(StringRef FileName, StringRef Contents,
OS << Contents;
if (OS.has_error())
- return std::make_error_code(std::errc::io_error);
+ return make_error_code(errc::io_error);
return EC;
}
@@ -472,4 +473,4 @@ bool llvm::sys::argumentsFitWithinSystemLimits(ArrayRef<const char*> Args) {
}
return true;
}
-}
+} // namespace llvm
diff --git a/lib/Support/Unix/ThreadLocal.inc b/lib/Support/Unix/ThreadLocal.inc
index 31c3f3835b29..a04dd3ee402b 100644
--- a/lib/Support/Unix/ThreadLocal.inc
+++ b/lib/Support/Unix/ThreadLocal.inc
@@ -56,7 +56,7 @@ void ThreadLocalImpl::removeInstance() {
setInstance(nullptr);
}
-}
+} // namespace llvm
#else
namespace llvm {
using namespace sys;
diff --git a/lib/Support/Unix/TimeValue.inc b/lib/Support/Unix/TimeValue.inc
index 042e0dacc346..2c4f04c04f12 100644
--- a/lib/Support/Unix/TimeValue.inc
+++ b/lib/Support/Unix/TimeValue.inc
@@ -51,4 +51,4 @@ TimeValue TimeValue::now() {
NANOSECONDS_PER_MICROSECOND ) );
}
-}
+} // namespace llvm
diff --git a/lib/Support/Unix/Watchdog.inc b/lib/Support/Unix/Watchdog.inc
index 5d89c0e51b11..9e335aaa8ca7 100644
--- a/lib/Support/Unix/Watchdog.inc
+++ b/lib/Support/Unix/Watchdog.inc
@@ -28,5 +28,5 @@ namespace llvm {
alarm(0);
#endif
}
- }
-}
+ } // namespace sys
+} // namespace llvm
diff --git a/lib/Support/Windows/Memory.inc b/lib/Support/Windows/Memory.inc
index ae8371abf5b3..4b2ff2e2d324 100644
--- a/lib/Support/Windows/Memory.inc
+++ b/lib/Support/Windows/Memory.inc
@@ -78,7 +78,15 @@ MemoryBlock Memory::allocateMappedMemory(size_t NumBytes,
// While we'd be happy to allocate single pages, the Windows allocation
// granularity may be larger than a single page (in practice, it is 64K)
// so mapping less than that will create an unreachable fragment of memory.
- static const size_t Granularity = getAllocationGranularity();
+ // Avoid using one-time initialization of static locals here, since they
+ // aren't thread safe with MSVC.
+ static volatile size_t GranularityCached;
+ size_t Granularity = GranularityCached;
+ if (Granularity == 0) {
+ Granularity = getAllocationGranularity();
+ GranularityCached = Granularity;
+ }
+
const size_t NumBlocks = (NumBytes+Granularity-1)/Granularity;
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
diff --git a/lib/Support/Windows/Program.inc b/lib/Support/Windows/Program.inc
index 75685de45547..c29d8729b1de 100644
--- a/lib/Support/Windows/Program.inc
+++ b/lib/Support/Windows/Program.inc
@@ -14,6 +14,7 @@
#include "WindowsSupport.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/WindowsError.h"
#include "llvm/Support/raw_ostream.h"
@@ -514,7 +515,7 @@ llvm::sys::writeFileWithEncoding(StringRef FileName, StringRef Contents,
}
if (OS.has_error())
- return std::make_error_code(std::errc::io_error);
+ return make_error_code(errc::io_error);
return EC;
}
diff --git a/lib/Support/YAMLParser.cpp b/lib/Support/YAMLParser.cpp
index d55da5ef1e4a..5ca28a052068 100644
--- a/lib/Support/YAMLParser.cpp
+++ b/lib/Support/YAMLParser.cpp
@@ -144,8 +144,8 @@ struct Token : ilist_node<Token> {
Token() : Kind(TK_Error) {}
};
-}
-}
+} // namespace yaml
+} // namespace llvm
namespace llvm {
template<>
@@ -178,7 +178,7 @@ struct ilist_node_traits<Token> {
BumpPtrAllocator Alloc;
};
-}
+} // namespace llvm
typedef ilist<Token> TokenQueueT;
@@ -203,7 +203,7 @@ struct SimpleKey {
return Tok == Other.Tok;
}
};
-}
+} // namespace
/// @brief The Unicode scalar value of a UTF-8 minimal well-formed code unit
/// subsequence and the subsequence's length in code units (uint8_t).
diff --git a/lib/TableGen/TGLexer.h b/lib/TableGen/TGLexer.h
index cbc30be8a572..d97d1caf6b88 100644
--- a/lib/TableGen/TGLexer.h
+++ b/lib/TableGen/TGLexer.h
@@ -60,7 +60,7 @@ namespace tgtok {
// String valued tokens.
Id, StrVal, VarName, CodeFragment
};
-}
+} // namespace tgtok
/// TGLexer - TableGen Lexer class.
class TGLexer {
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index bffd9e6e8c76..6c5a083b393d 100644
--- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -148,7 +148,7 @@ private:
Color getColor(unsigned Register);
Chain *getAndEraseNext(Color PreferredColor, std::vector<Chain*> &L);
};
-}
+} // namespace
char AArch64A57FPLoadBalancing::ID = 0;
diff --git a/lib/Target/AArch64/AArch64AsmPrinter.cpp b/lib/Target/AArch64/AArch64AsmPrinter.cpp
index da22d8d9e4c5..ada995bad37e 100644
--- a/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -121,7 +121,7 @@ private:
//===----------------------------------------------------------------------===//
void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
- Triple TT(TM.getTargetTriple());
+ const Triple &TT = TM.getTargetTriple();
if (TT.isOSBinFormatMachO()) {
// Funny Darwin hack: This flag tells the linker that no global symbols
// contain code that falls through to other global symbols (e.g. the obvious
diff --git a/lib/Target/AArch64/AArch64BranchRelaxation.cpp b/lib/Target/AArch64/AArch64BranchRelaxation.cpp
index d973234dd86a..176403ce124a 100644
--- a/lib/Target/AArch64/AArch64BranchRelaxation.cpp
+++ b/lib/Target/AArch64/AArch64BranchRelaxation.cpp
@@ -102,7 +102,7 @@ public:
}
};
char AArch64BranchRelaxation::ID = 0;
-}
+} // namespace
/// verify - check BBOffsets, BBSizes, alignment of islands
void AArch64BranchRelaxation::verify() {
diff --git a/lib/Target/AArch64/AArch64CallingConvention.h b/lib/Target/AArch64/AArch64CallingConvention.h
index 1e2d1c3b93bd..efc328a37e5f 100644
--- a/lib/Target/AArch64/AArch64CallingConvention.h
+++ b/lib/Target/AArch64/AArch64CallingConvention.h
@@ -136,6 +136,6 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, SlotAlign);
}
-}
+} // namespace
#endif
diff --git a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
index 06ff9af37fd7..11eefc4ff63d 100644
--- a/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
+++ b/lib/Target/AArch64/AArch64CleanupLocalDynamicTLSPass.cpp
@@ -135,7 +135,7 @@ struct LDTLSCleanup : public MachineFunctionPass {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char LDTLSCleanup::ID = 0;
FunctionPass *llvm::createAArch64CleanupLocalDynamicTLSPass() {
diff --git a/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index c2470f747a38..acb35251fc6d 100644
--- a/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -43,7 +43,7 @@ private:
unsigned BitSize);
};
char AArch64ExpandPseudo::ID = 0;
-}
+} // namespace
/// \brief Transfer implicit operands on the pseudo instruction to the
/// instructions created from the expansion.
diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp
index 9977e2b84a73..d1523e8548e2 100644
--- a/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/lib/Target/AArch64/AArch64FastISel.cpp
@@ -1678,6 +1678,9 @@ unsigned AArch64FastISel::emitAnd_ri(MVT RetVT, unsigned LHSReg, bool LHSIsKill,
unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr,
bool WantZExt, MachineMemOperand *MMO) {
+ if(!TLI.allowsMisalignedMemoryAccesses(VT))
+ return 0;
+
// Simplify this down to something we can handle.
if (!simplifyAddress(Addr, VT))
return 0;
@@ -1962,6 +1965,9 @@ bool AArch64FastISel::selectLoad(const Instruction *I) {
bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr,
MachineMemOperand *MMO) {
+ if(!TLI.allowsMisalignedMemoryAccesses(VT))
+ return false;
+
// Simplify this down to something we can handle.
if (!simplifyAddress(Addr, VT))
return false;
diff --git a/lib/Target/AArch64/AArch64FrameLowering.h b/lib/Target/AArch64/AArch64FrameLowering.h
index b496fccba349..11227eeaf3d7 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/lib/Target/AArch64/AArch64FrameLowering.h
@@ -63,6 +63,6 @@ public:
RegScavenger *RS) const override;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1616ff13535d..0165ef9c49c0 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -76,9 +76,6 @@ cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
cl::init(false));
-/// Value type used for condition codes.
-static const MVT MVT_CC = MVT::i32;
-
AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
const AArch64Subtarget &STI)
: TargetLowering(TM), Subtarget(&STI) {
@@ -810,9 +807,6 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::ADCS: return "AArch64ISD::ADCS";
case AArch64ISD::SBCS: return "AArch64ISD::SBCS";
case AArch64ISD::ANDS: return "AArch64ISD::ANDS";
- case AArch64ISD::CCMP: return "AArch64ISD::CCMP";
- case AArch64ISD::CCMN: return "AArch64ISD::CCMN";
- case AArch64ISD::FCCMP: return "AArch64ISD::FCCMP";
case AArch64ISD::FCMP: return "AArch64ISD::FCMP";
case AArch64ISD::FMIN: return "AArch64ISD::FMIN";
case AArch64ISD::FMAX: return "AArch64ISD::FMAX";
@@ -1171,133 +1165,10 @@ static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
LHS = LHS.getOperand(0);
}
- return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS)
+ return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS)
.getValue(1);
}
-static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
- ISD::CondCode CC, SDValue CCOp,
- SDValue Condition, unsigned NZCV,
- SDLoc DL, SelectionDAG &DAG) {
- unsigned Opcode = 0;
- if (LHS.getValueType().isFloatingPoint())
- Opcode = AArch64ISD::FCCMP;
- else if (RHS.getOpcode() == ISD::SUB) {
- SDValue SubOp0 = RHS.getOperand(0);
- if (const ConstantSDNode *SubOp0C = dyn_cast<ConstantSDNode>(SubOp0))
- if (SubOp0C->isNullValue() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- // See emitComparison() on why we can only do this for SETEQ and SETNE.
- Opcode = AArch64ISD::CCMN;
- RHS = RHS.getOperand(1);
- }
- }
- if (Opcode == 0)
- Opcode = AArch64ISD::CCMP;
-
- SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32);
- return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
-}
-
-/// Returns true if @p Val is a tree of AND/OR/SETCC operations.
-static bool isConjunctionDisjunctionTree(const SDValue Val, unsigned Depth) {
- if (!Val.hasOneUse())
- return false;
- if (Val->getOpcode() == ISD::SETCC)
- return true;
- // Protect against stack overflow.
- if (Depth > 1000)
- return false;
- if (Val->getOpcode() == ISD::AND || Val->getOpcode() == ISD::OR) {
- SDValue O0 = Val->getOperand(0);
- SDValue O1 = Val->getOperand(1);
- return isConjunctionDisjunctionTree(O0, Depth+1) &&
- isConjunctionDisjunctionTree(O1, Depth+1);
- }
- return false;
-}
-
-/// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain
-/// of CCMP/CFCMP ops. For example (SETCC_0 & SETCC_1) with condition cond0 and
-/// cond1 can be transformed into "CMP; CCMP" with CCMP executing on cond_0
-/// and setting flags to inversed(cond_1) otherwise.
-/// This recursive function produces DAG nodes that produce condition flags
-/// suitable to determine the truth value of @p Val (which is AND/OR/SETCC)
-/// by testing the result for the condition set to @p OutCC. If @p Negate is
-/// set the opposite truth value is produced. If @p CCOp and @p Condition are
-/// given then conditional comparison are created so that false is reported
-/// when they are false.
-static SDValue emitConjunctionDisjunctionTree(
- SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC, bool Negate,
- SDValue CCOp = SDValue(), AArch64CC::CondCode Condition = AArch64CC::AL) {
- assert(isConjunctionDisjunctionTree(Val, 0));
- // We're at a tree leaf, produce a c?f?cmp.
- unsigned Opcode = Val->getOpcode();
- if (Opcode == ISD::SETCC) {
- SDValue LHS = Val->getOperand(0);
- SDValue RHS = Val->getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get();
- bool isInteger = LHS.getValueType().isInteger();
- if (Negate)
- CC = getSetCCInverse(CC, isInteger);
- SDLoc DL(Val);
- // Determine OutCC and handle FP special case.
- if (isInteger) {
- OutCC = changeIntCCToAArch64CC(CC);
- } else {
- assert(LHS.getValueType().isFloatingPoint());
- AArch64CC::CondCode ExtraCC;
- changeFPCCToAArch64CC(CC, OutCC, ExtraCC);
- // Surpisingly some floating point conditions can't be tested with a
- // single condition code. Construct an additional comparison in this case.
- // See comment below on how we deal with OR conditions.
- if (ExtraCC != AArch64CC::AL) {
- SDValue ExtraCmp;
- if (!CCOp.getNode())
- ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG);
- else {
- SDValue ConditionOp = DAG.getConstant(Condition, DL, MVT_CC);
- // Note that we want the inverse of ExtraCC, so NZCV is not inversed.
- unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(ExtraCC);
- ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, ConditionOp,
- NZCV, DL, DAG);
- }
- CCOp = ExtraCmp;
- Condition = AArch64CC::getInvertedCondCode(ExtraCC);
- OutCC = AArch64CC::getInvertedCondCode(OutCC);
- }
- }
-
- // Produce a normal comparison if we are first in the chain
- if (!CCOp.getNode())
- return emitComparison(LHS, RHS, CC, DL, DAG);
- // Otherwise produce a ccmp.
- SDValue ConditionOp = DAG.getConstant(Condition, DL, MVT_CC);
- AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC);
- unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC);
- return emitConditionalComparison(LHS, RHS, CC, CCOp, ConditionOp, NZCV, DL,
- DAG);
- }
-
- // Construct comparison sequence for the left hand side.
- SDValue LHS = Val->getOperand(0);
- SDValue RHS = Val->getOperand(1);
-
- // We can only implement AND-like behaviour here, but negation is free. So we
- // use (not (and (not x) (not y))) to implement (or x y).
- bool isOr = Val->getOpcode() == ISD::OR;
- assert((isOr || Val->getOpcode() == ISD::AND) && "Should have AND or OR.");
- Negate ^= isOr;
-
- AArch64CC::CondCode RHSCC;
- SDValue CmpR =
- emitConjunctionDisjunctionTree(DAG, RHS, RHSCC, isOr, CCOp, Condition);
- SDValue CmpL =
- emitConjunctionDisjunctionTree(DAG, LHS, OutCC, isOr, CmpR, RHSCC);
- if (Negate)
- OutCC = AArch64CC::getInvertedCondCode(OutCC);
- return CmpL;
-}
-
static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &AArch64cc, SelectionDAG &DAG, SDLoc dl) {
SDValue Cmp;
@@ -1356,55 +1227,47 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
}
}
}
+ // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095.
+ // For the i8 operand, the largest immediate is 255, so this can be easily
+ // encoded in the compare instruction. For the i16 operand, however, the
+ // largest immediate cannot be encoded in the compare.
+ // Therefore, use a sign extending load and cmn to avoid materializing the -1
+ // constant. For example,
+ // movz w1, #65535
+ // ldrh w0, [x0, #0]
+ // cmp w0, w1
+ // >
+ // ldrsh w0, [x0, #0]
+ // cmn w0, #1
+ // Fundamental, we're relying on the property that (zext LHS) == (zext RHS)
+ // if and only if (sext LHS) == (sext RHS). The checks are in place to ensure
+ // both the LHS and RHS are truely zero extended and to make sure the
+ // transformation is profitable.
if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) {
- const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS);
-
- // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095.
- // For the i8 operand, the largest immediate is 255, so this can be easily
- // encoded in the compare instruction. For the i16 operand, however, the
- // largest immediate cannot be encoded in the compare.
- // Therefore, use a sign extending load and cmn to avoid materializing the
- // -1 constant. For example,
- // movz w1, #65535
- // ldrh w0, [x0, #0]
- // cmp w0, w1
- // >
- // ldrsh w0, [x0, #0]
- // cmn w0, #1
- // Fundamental, we're relying on the property that (zext LHS) == (zext RHS)
- // if and only if (sext LHS) == (sext RHS). The checks are in place to
- // ensure both the LHS and RHS are truely zero extended and to make sure the
- // transformation is profitable.
- if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) &&
- cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
- cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
- LHS.getNode()->hasNUsesOfValue(1, 0)) {
- int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
- if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
- SDValue SExt =
- DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
- DAG.getValueType(MVT::i16));
- Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl,
- RHS.getValueType()),
- CC, dl, DAG);
- AArch64CC = changeIntCCToAArch64CC(CC);
- goto CreateCCNode;
+ if ((cast<ConstantSDNode>(RHS)->getZExtValue() >> 16 == 0) &&
+ isa<LoadSDNode>(LHS)) {
+ if (cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD &&
+ cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 &&
+ LHS.getNode()->hasNUsesOfValue(1, 0)) {
+ int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
+ if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) {
+ SDValue SExt =
+ DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS,
+ DAG.getValueType(MVT::i16));
+ Cmp = emitComparison(SExt,
+ DAG.getConstant(ValueofRHS, dl,
+ RHS.getValueType()),
+ CC, dl, DAG);
+ AArch64CC = changeIntCCToAArch64CC(CC);
+ AArch64cc = DAG.getConstant(AArch64CC, dl, MVT::i32);
+ return Cmp;
+ }
}
}
-
- if ((RHSC->isNullValue() || RHSC->isOne()) &&
- isConjunctionDisjunctionTree(LHS, 0)) {
- bool Negate = (CC == ISD::SETNE) ^ RHSC->isNullValue();
- Cmp = emitConjunctionDisjunctionTree(DAG, LHS, AArch64CC, Negate);
- goto CreateCCNode;
- }
}
-
Cmp = emitComparison(LHS, RHS, CC, dl, DAG);
AArch64CC = changeIntCCToAArch64CC(CC);
-
-CreateCCNode:
- AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC);
+ AArch64cc = DAG.getConstant(AArch64CC, dl, MVT::i32);
return Cmp;
}
@@ -1561,7 +1424,7 @@ static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) {
ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal);
ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal);
- // The the values aren't constants, this isn't the pattern we're looking for.
+ // The values aren't constants, this isn't the pattern we're looking for.
if (!CFVal || !CTVal)
return Op;
@@ -2559,7 +2422,7 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization(
// cannot rely on the linker replacing the tail call with a return.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
const GlobalValue *GV = G->getGlobal();
- const Triple TT(getTargetMachine().getTargetTriple());
+ const Triple &TT = getTargetMachine().getTargetTriple();
if (GV->hasExternalWeakLinkage() &&
(!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
return false;
@@ -3557,7 +3420,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
EltVT = MVT::i64;
VecVT = MVT::v2i64;
- // We want to materialize a mask with the the high bit set, but the AdvSIMD
+ // We want to materialize a mask with the high bit set, but the AdvSIMD
// immediate moves cannot materialize that in a single instruction for
// 64-bit elements. Instead, materialize zero and then negate it.
EltMask = 0;
@@ -7580,21 +7443,26 @@ static SDValue tryCombineFixedPointConvert(SDNode *N,
//
// This routine does the actual conversion of such DUPs, once outer routines
// have determined that everything else is in order.
+// It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold
+// similarly here.
static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
- // We can handle most types of duplicate, but the lane ones have an extra
- // operand saying *which* lane, so we need to know.
- bool IsDUPLANE;
switch (N.getOpcode()) {
case AArch64ISD::DUP:
- IsDUPLANE = false;
- break;
case AArch64ISD::DUPLANE8:
case AArch64ISD::DUPLANE16:
case AArch64ISD::DUPLANE32:
case AArch64ISD::DUPLANE64:
- IsDUPLANE = true;
+ case AArch64ISD::MOVI:
+ case AArch64ISD::MOVIshift:
+ case AArch64ISD::MOVIedit:
+ case AArch64ISD::MOVImsl:
+ case AArch64ISD::MVNIshift:
+ case AArch64ISD::MVNImsl:
break;
default:
+ // FMOV could be supported, but isn't very useful, as it would only occur
+ // if you passed a bitcast' floating point immediate to an eligible long
+ // integer op (addl, smull, ...).
return SDValue();
}
@@ -7604,17 +7472,11 @@ static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
MVT ElementTy = NarrowTy.getVectorElementType();
unsigned NumElems = NarrowTy.getVectorNumElements();
- MVT NewDUPVT = MVT::getVectorVT(ElementTy, NumElems * 2);
+ MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2);
SDLoc dl(N);
- SDValue NewDUP;
- if (IsDUPLANE)
- NewDUP = DAG.getNode(N.getOpcode(), dl, NewDUPVT, N.getOperand(0),
- N.getOperand(1));
- else
- NewDUP = DAG.getNode(AArch64ISD::DUP, dl, NewDUPVT, N.getOperand(0));
-
- return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NarrowTy, NewDUP,
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NarrowTy,
+ DAG.getNode(N->getOpcode(), dl, NewVT, N->ops()),
DAG.getConstant(NumElems, dl, MVT::i64));
}
@@ -8913,6 +8775,14 @@ static SDValue performSelectCCCombine(SDNode *N, SelectionDAG &DAG) {
return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), LHS, RHS);
}
+/// Get rid of unnecessary NVCASTs (that don't change the type).
+static SDValue performNVCASTCombine(SDNode *N) {
+ if (N->getValueType(0) == N->getOperand(0).getValueType())
+ return N->getOperand(0);
+
+ return SDValue();
+}
+
SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -8955,6 +8825,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performCONDCombine(N, DCI, DAG, 2, 3);
case AArch64ISD::DUP:
return performPostLD1Combine(N, DCI, false);
+ case AArch64ISD::NVCAST:
+ return performNVCASTCombine(N);
case ISD::INSERT_VECTOR_ELT:
return performPostLD1Combine(N, DCI, true);
case ISD::INTRINSIC_VOID:
@@ -9260,8 +9132,3 @@ bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
return Ty->isArrayTy();
}
-
-bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
- EVT) const {
- return false;
-}
diff --git a/lib/Target/AArch64/AArch64ISelLowering.h b/lib/Target/AArch64/AArch64ISelLowering.h
index db192c78169a..da42376ac250 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/lib/Target/AArch64/AArch64ISelLowering.h
@@ -58,11 +58,6 @@ enum NodeType : unsigned {
SBCS,
ANDS,
- // Conditional compares. Operands: left,right,falsecc,cc,flags
- CCMP,
- CCMN,
- FCCMP,
-
// Floating point comparison
FCMP,
@@ -513,8 +508,6 @@ private:
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
CallingConv::ID CallConv,
bool isVarArg) const override;
-
- bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
};
namespace AArch64 {
diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td
index 1fe9c7f8cc5a..2c52f340d6d1 100644
--- a/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/lib/Target/AArch64/AArch64InstrFormats.td
@@ -525,13 +525,6 @@ def imm0_31 : Operand<i64>, ImmLeaf<i64, [{
let ParserMatchClass = Imm0_31Operand;
}
-// True if the 32-bit immediate is in the range [0,31]
-def imm32_0_31 : Operand<i32>, ImmLeaf<i32, [{
- return ((uint64_t)Imm) < 32;
-}]> {
- let ParserMatchClass = Imm0_31Operand;
-}
-
// imm0_15 predicate - True if the immediate is in the range [0,15]
def imm0_15 : Operand<i64>, ImmLeaf<i64, [{
return ((uint64_t)Imm) < 16;
@@ -549,9 +542,7 @@ def imm0_7 : Operand<i64>, ImmLeaf<i64, [{
// imm32_0_15 predicate - True if the 32-bit immediate is in the range [0,15]
def imm32_0_15 : Operand<i32>, ImmLeaf<i32, [{
return ((uint32_t)Imm) < 16;
-}]> {
- let ParserMatchClass = Imm0_15Operand;
-}
+}]>;
// An arithmetic shifter operand:
// {7-6} - shift type: 00 = lsl, 01 = lsr, 10 = asr
@@ -2077,12 +2068,9 @@ multiclass LogicalRegS<bits<2> opc, bit N, string mnemonic,
//---
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
-class BaseCondComparisonImm<bit op, RegisterClass regtype, ImmLeaf immtype,
- string mnemonic, SDNode OpNode>
- : I<(outs), (ins regtype:$Rn, immtype:$imm, imm32_0_15:$nzcv, ccode:$cond),
- mnemonic, "\t$Rn, $imm, $nzcv, $cond", "",
- [(set NZCV, (OpNode regtype:$Rn, immtype:$imm, (i32 imm:$nzcv),
- (i32 imm:$cond), NZCV))]>,
+class BaseCondSetFlagsImm<bit op, RegisterClass regtype, string asm>
+ : I<(outs), (ins regtype:$Rn, imm0_31:$imm, imm0_15:$nzcv, ccode:$cond),
+ asm, "\t$Rn, $imm, $nzcv, $cond", "", []>,
Sched<[WriteI, ReadI]> {
let Uses = [NZCV];
let Defs = [NZCV];
@@ -2102,13 +2090,19 @@ class BaseCondComparisonImm<bit op, RegisterClass regtype, ImmLeaf immtype,
let Inst{3-0} = nzcv;
}
+multiclass CondSetFlagsImm<bit op, string asm> {
+ def Wi : BaseCondSetFlagsImm<op, GPR32, asm> {
+ let Inst{31} = 0;
+ }
+ def Xi : BaseCondSetFlagsImm<op, GPR64, asm> {
+ let Inst{31} = 1;
+ }
+}
+
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
-class BaseCondComparisonReg<bit op, RegisterClass regtype, string mnemonic,
- SDNode OpNode>
- : I<(outs), (ins regtype:$Rn, regtype:$Rm, imm32_0_15:$nzcv, ccode:$cond),
- mnemonic, "\t$Rn, $Rm, $nzcv, $cond", "",
- [(set NZCV, (OpNode regtype:$Rn, regtype:$Rm, (i32 imm:$nzcv),
- (i32 imm:$cond), NZCV))]>,
+class BaseCondSetFlagsReg<bit op, RegisterClass regtype, string asm>
+ : I<(outs), (ins regtype:$Rn, regtype:$Rm, imm0_15:$nzcv, ccode:$cond),
+ asm, "\t$Rn, $Rm, $nzcv, $cond", "", []>,
Sched<[WriteI, ReadI, ReadI]> {
let Uses = [NZCV];
let Defs = [NZCV];
@@ -2128,19 +2122,11 @@ class BaseCondComparisonReg<bit op, RegisterClass regtype, string mnemonic,
let Inst{3-0} = nzcv;
}
-multiclass CondComparison<bit op, string mnemonic, SDNode OpNode> {
- // immediate operand variants
- def Wi : BaseCondComparisonImm<op, GPR32, imm32_0_31, mnemonic, OpNode> {
+multiclass CondSetFlagsReg<bit op, string asm> {
+ def Wr : BaseCondSetFlagsReg<op, GPR32, asm> {
let Inst{31} = 0;
}
- def Xi : BaseCondComparisonImm<op, GPR64, imm0_31, mnemonic, OpNode> {
- let Inst{31} = 1;
- }
- // register operand variants
- def Wr : BaseCondComparisonReg<op, GPR32, mnemonic, OpNode> {
- let Inst{31} = 0;
- }
- def Xr : BaseCondComparisonReg<op, GPR64, mnemonic, OpNode> {
+ def Xr : BaseCondSetFlagsReg<op, GPR64, asm> {
let Inst{31} = 1;
}
}
@@ -3948,14 +3934,11 @@ multiclass FPComparison<bit signalAllNans, string asm,
//---
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
-class BaseFPCondComparison<bit signalAllNans, RegisterClass regtype,
- string mnemonic, list<dag> pat>
- : I<(outs), (ins regtype:$Rn, regtype:$Rm, imm32_0_15:$nzcv, ccode:$cond),
- mnemonic, "\t$Rn, $Rm, $nzcv, $cond", "", pat>,
+class BaseFPCondComparison<bit signalAllNans,
+ RegisterClass regtype, string asm>
+ : I<(outs), (ins regtype:$Rn, regtype:$Rm, imm0_15:$nzcv, ccode:$cond),
+ asm, "\t$Rn, $Rm, $nzcv, $cond", "", []>,
Sched<[WriteFCmp]> {
- let Uses = [NZCV];
- let Defs = [NZCV];
-
bits<5> Rn;
bits<5> Rm;
bits<4> nzcv;
@@ -3971,18 +3954,16 @@ class BaseFPCondComparison<bit signalAllNans, RegisterClass regtype,
let Inst{3-0} = nzcv;
}
-multiclass FPCondComparison<bit signalAllNans, string mnemonic,
- SDPatternOperator OpNode = null_frag> {
- def Srr : BaseFPCondComparison<signalAllNans, FPR32, mnemonic,
- [(set NZCV, (OpNode (f32 FPR32:$Rn), (f32 FPR32:$Rm), (i32 imm:$nzcv),
- (i32 imm:$cond), NZCV))]> {
+multiclass FPCondComparison<bit signalAllNans, string asm> {
+ let Defs = [NZCV], Uses = [NZCV] in {
+ def Srr : BaseFPCondComparison<signalAllNans, FPR32, asm> {
let Inst{22} = 0;
}
- def Drr : BaseFPCondComparison<signalAllNans, FPR64, mnemonic,
- [(set NZCV, (OpNode (f64 FPR64:$Rn), (f64 FPR64:$Rm), (i32 imm:$nzcv),
- (i32 imm:$cond), NZCV))]> {
+
+ def Drr : BaseFPCondComparison<signalAllNans, FPR64, asm> {
let Inst{22} = 1;
}
+ } // Defs = [NZCV], Uses = [NZCV]
}
//---
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index 6941a6bf1b47..8d8864cfe65f 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -255,7 +255,7 @@ unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
void AArch64InstrInfo::instantiateCondBranch(
MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ ArrayRef<MachineOperand> Cond) const {
if (Cond[0].getImm() != -1) {
// Regular Bcc
BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
@@ -272,7 +272,7 @@ void AArch64InstrInfo::instantiateCondBranch(
unsigned AArch64InstrInfo::InsertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
+ ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
@@ -369,7 +369,7 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
}
bool AArch64InstrInfo::canInsertSelect(
- const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
+ const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
int &FalseCycles) const {
// Check register classes.
@@ -412,7 +412,7 @@ bool AArch64InstrInfo::canInsertSelect(
void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DstReg,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
@@ -629,8 +629,8 @@ AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
// base registers are identical, and the offset of a lower memory access +
// the width doesn't overlap the offset of a higher memory access,
// then the memory accesses are different.
- if (getLdStBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
- getLdStBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
+ if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
+ getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
if (BaseRegA == BaseRegB) {
int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
@@ -1310,9 +1310,9 @@ void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
}
bool
-AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
- const TargetRegisterInfo *TRI) const {
+AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
switch (LdSt->getOpcode()) {
default:
return false;
@@ -1336,7 +1336,7 @@ AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
};
}
-bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
+bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
const TargetRegisterInfo *TRI) const {
// Handle only loads/stores with base register followed by immediate offset.
@@ -1434,7 +1434,7 @@ bool AArch64InstrInfo::getLdStBaseRegImmOfsWidth(
/// Detect opportunities for ldp/stp formation.
///
-/// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
+/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
unsigned NumLoads) const {
@@ -1443,7 +1443,7 @@ bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
return false;
if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
return false;
- // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
+ // getMemOpBaseRegImmOfs guarantees that oper 2 isImm.
unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
// Allow 6 bits of positive range.
if (Ofs1 > 64)
@@ -2459,15 +2459,15 @@ static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
return true;
}
-/// hasPattern - return true when there is potentially a faster code sequence
+/// Return true when there is potentially a faster code sequence
/// for an instruction chain ending in \p Root. All potential patterns are
/// listed
/// in the \p Pattern vector. Pattern should be sorted in priority order since
/// the pattern evaluator stops checking as soon as it finds a faster sequence.
-bool AArch64InstrInfo::hasPattern(
+bool AArch64InstrInfo::getMachineCombinerPatterns(
MachineInstr &Root,
- SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Pattern) const {
+ SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns) const {
unsigned Opc = Root.getOpcode();
MachineBasicBlock &MBB = *Root.getParent();
bool Found = false;
@@ -2495,76 +2495,76 @@ bool AArch64InstrInfo::hasPattern(
"ADDWrr does not have register operands");
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
AArch64::WZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP1);
+ Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP1);
Found = true;
}
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
AArch64::WZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULADDW_OP2);
+ Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP2);
Found = true;
}
break;
case AArch64::ADDXrr:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
AArch64::XZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP1);
+ Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP1);
Found = true;
}
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
AArch64::XZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULADDX_OP2);
+ Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP2);
Found = true;
}
break;
case AArch64::SUBWrr:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
AArch64::WZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP1);
+ Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP1);
Found = true;
}
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
AArch64::WZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULSUBW_OP2);
+ Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP2);
Found = true;
}
break;
case AArch64::SUBXrr:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
AArch64::XZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP1);
+ Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP1);
Found = true;
}
if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
AArch64::XZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULSUBX_OP2);
+ Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP2);
Found = true;
}
break;
case AArch64::ADDWri:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
AArch64::WZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULADDWI_OP1);
+ Patterns.push_back(MachineCombinerPattern::MC_MULADDWI_OP1);
Found = true;
}
break;
case AArch64::ADDXri:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
AArch64::XZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULADDXI_OP1);
+ Patterns.push_back(MachineCombinerPattern::MC_MULADDXI_OP1);
Found = true;
}
break;
case AArch64::SUBWri:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
AArch64::WZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1);
+ Patterns.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1);
Found = true;
}
break;
case AArch64::SUBXri:
if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
AArch64::XZR)) {
- Pattern.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1);
+ Patterns.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1);
Found = true;
}
break;
@@ -2667,7 +2667,7 @@ static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
return MUL;
}
-/// genAlternativeCodeSequence - when hasPattern() finds a pattern
+/// When getMachineCombinerPatterns() finds potential patterns,
/// this function generates the instructions that could replace the
/// original code sequence
void AArch64InstrInfo::genAlternativeCodeSequence(
diff --git a/lib/Target/AArch64/AArch64InstrInfo.h b/lib/Target/AArch64/AArch64InstrInfo.h
index d296768ab9b0..68c2a2882580 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/lib/Target/AArch64/AArch64InstrInfo.h
@@ -90,13 +90,13 @@ public:
/// Hint that pairing the given load or store is unprofitable.
void suppressLdStPair(MachineInstr *MI) const;
- bool getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
- unsigned &Offset,
- const TargetRegisterInfo *TRI) const override;
+ bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const override;
- bool getLdStBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
- int &Offset, int &Width,
- const TargetRegisterInfo *TRI) const;
+ bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
+ int &Offset, int &Width,
+ const TargetRegisterInfo *TRI) const;
bool enableClusterLoads() const override { return true; }
@@ -140,17 +140,14 @@ public:
bool AllowModify = false) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
- bool canInsertSelect(const MachineBasicBlock &,
- const SmallVectorImpl<MachineOperand> &Cond, unsigned,
- unsigned, int &, int &, int &) const override;
+ bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
+ unsigned, unsigned, int &, int &, int &) const override;
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- DebugLoc DL, unsigned DstReg,
- const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL, unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const override;
void getNoopForMachoTarget(MCInst &NopInst) const override;
@@ -166,19 +163,17 @@ public:
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
bool optimizeCondBranch(MachineInstr *MI) const override;
- /// hasPattern - return true when there is potentially a faster code sequence
+ /// Return true when there is potentially a faster code sequence
/// for an instruction chain ending in <Root>. All potential patterns are
- /// listed
- /// in the <Pattern> array.
- bool hasPattern(MachineInstr &Root,
- SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Pattern)
+ /// listed in the <Patterns> array.
+ bool getMachineCombinerPatterns(MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns)
const override;
- /// genAlternativeCodeSequence - when hasPattern() finds a pattern
- /// this function generates the instructions that could replace the
- /// original code sequence
+ /// When getMachineCombinerPatterns() finds patterns, this function generates
+ /// the instructions that could replace the original code sequence
void genAlternativeCodeSequence(
- MachineInstr &Root, MachineCombinerPattern::MC_PATTERN P,
+ MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
@@ -189,7 +184,7 @@ public:
private:
void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL,
MachineBasicBlock *TBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ ArrayRef<MachineOperand> Cond) const;
};
/// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
diff --git a/lib/Target/AArch64/AArch64InstrInfo.td b/lib/Target/AArch64/AArch64InstrInfo.td
index 2f1b8933bf61..653f80286b25 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/lib/Target/AArch64/AArch64InstrInfo.td
@@ -66,20 +66,6 @@ def SDT_AArch64CSel : SDTypeProfile<1, 4,
SDTCisSameAs<0, 2>,
SDTCisInt<3>,
SDTCisVT<4, i32>]>;
-def SDT_AArch64CCMP : SDTypeProfile<1, 5,
- [SDTCisVT<0, i32>,
- SDTCisInt<1>,
- SDTCisSameAs<1, 2>,
- SDTCisInt<3>,
- SDTCisInt<4>,
- SDTCisVT<5, i32>]>;
-def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
- [SDTCisVT<0, i32>,
- SDTCisFP<1>,
- SDTCisSameAs<1, 2>,
- SDTCisInt<3>,
- SDTCisInt<4>,
- SDTCisVT<5, i32>]>;
def SDT_AArch64FCmp : SDTypeProfile<0, 2,
[SDTCisFP<0>,
SDTCisSameAs<0, 1>]>;
@@ -174,10 +160,6 @@ def AArch64and_flag : SDNode<"AArch64ISD::ANDS", SDTBinaryArithWithFlagsOut,
def AArch64adc_flag : SDNode<"AArch64ISD::ADCS", SDTBinaryArithWithFlagsInOut>;
def AArch64sbc_flag : SDNode<"AArch64ISD::SBCS", SDTBinaryArithWithFlagsInOut>;
-def AArch64ccmp : SDNode<"AArch64ISD::CCMP", SDT_AArch64CCMP>;
-def AArch64ccmn : SDNode<"AArch64ISD::CCMN", SDT_AArch64CCMP>;
-def AArch64fccmp : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
-
def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
def AArch64fcmp : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
@@ -1036,10 +1018,13 @@ def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
//===----------------------------------------------------------------------===//
-// Conditional comparison instructions.
+// Conditionally set flags instructions.
//===----------------------------------------------------------------------===//
-defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
-defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
+defm CCMN : CondSetFlagsImm<0, "ccmn">;
+defm CCMP : CondSetFlagsImm<1, "ccmp">;
+
+defm CCMN : CondSetFlagsReg<0, "ccmn">;
+defm CCMP : CondSetFlagsReg<1, "ccmp">;
//===----------------------------------------------------------------------===//
// Conditional select instructions.
@@ -2569,7 +2554,7 @@ defm FCMP : FPComparison<0, "fcmp", AArch64fcmp>;
//===----------------------------------------------------------------------===//
defm FCCMPE : FPCondComparison<1, "fccmpe">;
-defm FCCMP : FPCondComparison<0, "fccmp", AArch64fccmp>;
+defm FCCMP : FPCondComparison<0, "fccmp">;
//===----------------------------------------------------------------------===//
// Floating point conditional select instruction.
diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 186e71a3307c..82f77a77ab5e 100644
--- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -623,7 +623,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// and first alias with the second, we can combine the second into the
// first.
if (!ModifiedRegs[MI->getOperand(0).getReg()] &&
- !UsedRegs[MI->getOperand(0).getReg()] &&
+ !(MI->mayLoad() && UsedRegs[MI->getOperand(0).getReg()]) &&
!mayAlias(MI, MemInsns, TII)) {
MergeForward = false;
return MBBI;
@@ -634,7 +634,8 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// first and the second alias with the first, we can combine the first
// into the second.
if (!ModifiedRegs[FirstMI->getOperand(0).getReg()] &&
- !UsedRegs[FirstMI->getOperand(0).getReg()] &&
+ !(FirstMI->mayLoad() &&
+ UsedRegs[FirstMI->getOperand(0).getReg()]) &&
!mayAlias(FirstMI, MemInsns, TII)) {
MergeForward = true;
return MBBI;
diff --git a/lib/Target/AArch64/AArch64MCInstLower.h b/lib/Target/AArch64/AArch64MCInstLower.h
index 1e29b80c2d62..908f66f8e296 100644
--- a/lib/Target/AArch64/AArch64MCInstLower.h
+++ b/lib/Target/AArch64/AArch64MCInstLower.h
@@ -47,6 +47,6 @@ public:
MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/AArch64/AArch64MachineFunctionInfo.h b/lib/Target/AArch64/AArch64MachineFunctionInfo.h
index 536a8d0f97a0..2a0f0a47b05c 100644
--- a/lib/Target/AArch64/AArch64MachineFunctionInfo.h
+++ b/lib/Target/AArch64/AArch64MachineFunctionInfo.h
@@ -158,6 +158,6 @@ private:
MILOHContainer LOHContainerSet;
SetOfInstructions LOHRelated;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp b/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
index 5394875a6bc1..bab84631f2b1 100644
--- a/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
+++ b/lib/Target/AArch64/AArch64PBQPRegAlloc.cpp
@@ -154,7 +154,7 @@ bool haveSameParity(unsigned reg1, unsigned reg2) {
return isOdd(reg1) == isOdd(reg2);
}
-}
+} // namespace
bool A57ChainingConstraint::addIntraChainConstraint(PBQPRAGraph &G, unsigned Rd,
unsigned Ra) {
diff --git a/lib/Target/AArch64/AArch64PBQPRegAlloc.h b/lib/Target/AArch64/AArch64PBQPRegAlloc.h
index 4f656f94ea12..c83aea452513 100644
--- a/lib/Target/AArch64/AArch64PBQPRegAlloc.h
+++ b/lib/Target/AArch64/AArch64PBQPRegAlloc.h
@@ -33,6 +33,6 @@ private:
// Add constraints between existing chains
void addInterChainConstraint(PBQPRAGraph &G, unsigned Rd, unsigned Ra);
};
-}
+} // namespace llvm
#endif // LLVM_LIB_TARGET_AARCH64_AARCH64PBQPREGALOC_H
diff --git a/lib/Target/AArch64/AArch64SelectionDAGInfo.h b/lib/Target/AArch64/AArch64SelectionDAGInfo.h
index 11932d2b1c22..a993b6059131 100644
--- a/lib/Target/AArch64/AArch64SelectionDAGInfo.h
+++ b/lib/Target/AArch64/AArch64SelectionDAGInfo.h
@@ -28,6 +28,6 @@ public:
unsigned Align, bool isVolatile,
MachinePointerInfo DstPtrInfo) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/AArch64/AArch64StorePairSuppress.cpp b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
index 85b44a20e11a..e8165a8e4085 100644
--- a/lib/Target/AArch64/AArch64StorePairSuppress.cpp
+++ b/lib/Target/AArch64/AArch64StorePairSuppress.cpp
@@ -57,7 +57,7 @@ private:
}
};
char AArch64StorePairSuppress::ID = 0;
-} // anonymous
+} // namespace
FunctionPass *llvm::createAArch64StorePairSuppressPass() {
return new AArch64StorePairSuppress();
@@ -142,7 +142,7 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
continue;
unsigned BaseReg;
unsigned Offset;
- if (TII->getLdStBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
+ if (TII->getMemOpBaseRegImmOfs(&MI, BaseReg, Offset, TRI)) {
if (PrevBaseReg == BaseReg) {
// If this block can take STPs, skip ahead to the next block.
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp
index 0b97af80a6ad..554826b1e08a 100644
--- a/lib/Target/AArch64/AArch64Subtarget.cpp
+++ b/lib/Target/AArch64/AArch64Subtarget.cpp
@@ -42,14 +42,12 @@ AArch64Subtarget::initializeSubtargetDependencies(StringRef FS) {
return *this;
}
-AArch64Subtarget::AArch64Subtarget(const std::string &TT,
- const std::string &CPU,
+AArch64Subtarget::AArch64Subtarget(const Triple &TT, const std::string &CPU,
const std::string &FS,
const TargetMachine &TM, bool LittleEndian)
: AArch64GenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others),
- HasV8_1aOps(false),
- HasFPARMv8(false), HasNEON(false), HasCrypto(false), HasCRC(false),
- HasZeroCycleRegMove(false), HasZeroCycleZeroing(false),
+ HasV8_1aOps(false), HasFPARMv8(false), HasNEON(false), HasCrypto(false),
+ HasCRC(false), HasZeroCycleRegMove(false), HasZeroCycleZeroing(false),
IsLittle(LittleEndian), CPUString(CPU), TargetTriple(TT), FrameLowering(),
InstrInfo(initializeSubtargetDependencies(FS)),
TSInfo(TM.getDataLayout()), TLInfo(TM, *this) {}
diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h
index 5454b205719e..c9b54cc3819c 100644
--- a/lib/Target/AArch64/AArch64Subtarget.h
+++ b/lib/Target/AArch64/AArch64Subtarget.h
@@ -29,6 +29,7 @@
namespace llvm {
class GlobalValue;
class StringRef;
+class Triple;
class AArch64Subtarget : public AArch64GenSubtargetInfo {
protected:
@@ -71,7 +72,7 @@ private:
public:
/// This constructor initializes the data members to match that
/// of the specified triple.
- AArch64Subtarget(const std::string &TT, const std::string &CPU,
+ AArch64Subtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM,
bool LittleEndian);
@@ -90,7 +91,7 @@ public:
}
const Triple &getTargetTriple() const { return TargetTriple; }
bool enableMachineScheduler() const override { return true; }
- bool enablePostMachineScheduler() const override {
+ bool enablePostRAScheduler() const override {
return isCortexA53() || isCortexA57();
}
@@ -150,6 +151,6 @@ public:
std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const override;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/AArch64/AArch64TargetMachine.cpp b/lib/Target/AArch64/AArch64TargetMachine.cpp
index f23dd33d0146..5496a50f6b6e 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -110,9 +110,8 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
}
// Helper function to build a DataLayout string
-static std::string computeDataLayout(StringRef TT, bool LittleEndian) {
- Triple Triple(TT);
- if (Triple.isOSBinFormatMachO())
+static std::string computeDataLayout(const Triple &TT, bool LittleEndian) {
+ if (TT.isOSBinFormatMachO())
return "e-m:o-i64:64-i128:128-n32:64-S128";
if (LittleEndian)
return "e-m:e-i64:64-i128:128-n32:64-S128";
@@ -121,7 +120,7 @@ static std::string computeDataLayout(StringRef TT, bool LittleEndian) {
/// TargetMachine ctor - Create an AArch64 architecture model.
///
-AArch64TargetMachine::AArch64TargetMachine(const Target &T, StringRef TT,
+AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
@@ -131,7 +130,7 @@ AArch64TargetMachine::AArch64TargetMachine(const Target &T, StringRef TT,
// initialized before TLInfo is constructed.
: LLVMTargetMachine(T, computeDataLayout(TT, LittleEndian), TT, CPU, FS,
Options, RM, CM, OL),
- TLOF(createTLOF(Triple(getTargetTriple()))),
+ TLOF(createTLOF(getTargetTriple())),
isLittle(LittleEndian) {
initAsmInfo();
}
@@ -156,28 +155,27 @@ AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
// creation will depend on the TM and the code generation flags on the
// function that reside in TargetOptions.
resetTargetOptions(F);
- I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this, isLittle);
+ I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
+ isLittle);
}
return I.get();
}
void AArch64leTargetMachine::anchor() { }
-AArch64leTargetMachine::
-AArch64leTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
+AArch64leTargetMachine::AArch64leTargetMachine(
+ const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
void AArch64beTargetMachine::anchor() { }
-AArch64beTargetMachine::
-AArch64beTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
+AArch64beTargetMachine::AArch64beTargetMachine(
+ const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
namespace {
/// AArch64 Code Generator Pass Configuration Options.
@@ -269,7 +267,7 @@ bool AArch64PassConfig::addInstSelector() {
// For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
// references to _TLS_MODULE_BASE_ as possible.
- if (Triple(TM->getTargetTriple()).isOSBinFormatELF() &&
+ if (TM->getTargetTriple().isOSBinFormatELF() &&
getOptLevel() != CodeGenOpt::None)
addPass(createAArch64CleanupLocalDynamicTLSPass());
@@ -324,6 +322,6 @@ void AArch64PassConfig::addPreEmitPass() {
// range of their destination.
addPass(createAArch64BranchRelaxation());
if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
- Triple(TM->getTargetTriple()).isOSBinFormatMachO())
+ TM->getTargetTriple().isOSBinFormatMachO())
addPass(createAArch64CollectLOHPass());
}
diff --git a/lib/Target/AArch64/AArch64TargetMachine.h b/lib/Target/AArch64/AArch64TargetMachine.h
index ec34fad97c8d..8d49a29386ac 100644
--- a/lib/Target/AArch64/AArch64TargetMachine.h
+++ b/lib/Target/AArch64/AArch64TargetMachine.h
@@ -27,7 +27,7 @@ protected:
mutable StringMap<std::unique_ptr<AArch64Subtarget>> SubtargetMap;
public:
- AArch64TargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ AArch64TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool IsLittleEndian);
@@ -54,7 +54,7 @@ private:
class AArch64leTargetMachine : public AArch64TargetMachine {
virtual void anchor();
public:
- AArch64leTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ AArch64leTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
@@ -65,7 +65,7 @@ public:
class AArch64beTargetMachine : public AArch64TargetMachine {
virtual void anchor();
public:
- AArch64beTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ AArch64beTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
diff --git a/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp b/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
index eb05ed915ddb..82bc949927ce 100644
--- a/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
+++ b/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp
@@ -52,7 +52,7 @@ getVariant(uint64_t LLVMDisassembler_VariantKind) {
/// returns zero and isBranch is Success then a symbol look up for
/// Address + Value is done and if a symbol is found an MCExpr is created with
/// that, else an MCExpr with Address + Value is created. If GetOpInfo()
-/// returns zero and isBranch is Fail then the the Opcode of the MCInst is
+/// returns zero and isBranch is Fail then the Opcode of the MCInst is
/// tested and for ADRP an other instructions that help to load of pointers
/// a symbol look up is done to see it is returns a specific reference type
/// to add to the comment stream. This function returns Success if it adds
diff --git a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
index 96fbe3a9af4d..7f56c2cf6bb8 100644
--- a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
+++ b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp
@@ -1358,7 +1358,7 @@ void AArch64InstPrinter::printSystemPStateField(const MCInst *MI, unsigned OpNo,
StringRef Name =
AArch64PState::PStateMapper().toString(Val, STI.getFeatureBits(), Valid);
if (Valid)
- O << StringRef(Name.str()).upper();
+ O << Name.upper();
else
O << "#" << Val;
}
diff --git a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
index 15dee978e229..19544ac600d6 100644
--- a/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
+++ b/lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h
@@ -181,6 +181,6 @@ public:
static const char *getRegisterName(unsigned RegNo,
unsigned AltIdx = AArch64::NoRegAltName);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 6c15bf3afb2d..3e982ee03986 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -293,7 +293,7 @@ enum CompactUnwindEncodings {
UNWIND_AArch64_FRAME_D14_D15_PAIR = 0x00000800
};
-} // end CU namespace
+} // namespace CU
// FIXME: This should be in a separate file.
class DarwinAArch64AsmBackend : public AArch64AsmBackend {
@@ -517,14 +517,13 @@ void ELFAArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
}
AArch64AsmBackend::applyFixup (Fixup, Data, DataSize, Value, IsPCRel);
}
-}
+} // namespace
MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
- const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
- Triple TheTriple(TT);
-
- if (TheTriple.isOSDarwin())
+ const MCRegisterInfo &MRI,
+ const Triple &TheTriple,
+ StringRef CPU) {
+ if (TheTriple.isOSBinFormatMachO())
return new DarwinAArch64AsmBackend(T, MRI);
assert(TheTriple.isOSBinFormatELF() && "Expect either MachO or ELF target");
@@ -533,10 +532,9 @@ MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T,
}
MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T,
- const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
- Triple TheTriple(TT);
-
+ const MCRegisterInfo &MRI,
+ const Triple &TheTriple,
+ StringRef CPU) {
assert(TheTriple.isOSBinFormatELF() &&
"Big endian is only supported for ELF targets!");
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
index 1f516d1db896..807679fb1a21 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp
@@ -34,7 +34,7 @@ protected:
private:
};
-}
+} // namespace
AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI,
bool IsLittleEndian)
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
index 78837de18b97..bbcbf514069c 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp
@@ -208,9 +208,9 @@ MCELFStreamer *createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB,
MCTargetStreamer *
createAArch64ObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
- Triple TT(STI.getTargetTriple());
+ const Triple &TT = STI.getTargetTriple();
if (TT.getObjectFormat() == Triple::ELF)
return new AArch64TargetELFStreamer(S);
return nullptr;
}
-}
+} // namespace llvm
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index f89a85273872..099d1b01c339 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -41,7 +41,7 @@ static MCInstrInfo *createAArch64MCInstrInfo() {
}
static MCSubtargetInfo *
-createAArch64MCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS) {
+createAArch64MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
if (CPU.empty())
@@ -60,7 +60,7 @@ static MCRegisterInfo *createAArch64MCRegisterInfo(StringRef Triple) {
static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI,
const Triple &TheTriple) {
MCAsmInfo *MAI;
- if (TheTriple.isOSDarwin())
+ if (TheTriple.isOSBinFormatMachO())
MAI = new AArch64MCAsmInfoDarwin();
else {
assert(TheTriple.isOSBinFormatELF() && "Only expect Darwin or ELF");
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
index 4705bdf546ff..ca56f6393c41 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h
@@ -43,11 +43,11 @@ MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
MCContext &Ctx);
MCAsmBackend *createAArch64leAsmBackend(const Target &T,
- const MCRegisterInfo &MRI, StringRef TT,
- StringRef CPU);
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
MCAsmBackend *createAArch64beAsmBackend(const Target &T,
- const MCRegisterInfo &MRI, StringRef TT,
- StringRef CPU);
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
MCObjectWriter *createAArch64ELFObjectWriter(raw_pwrite_stream &OS,
uint8_t OSABI,
@@ -65,7 +65,7 @@ MCTargetStreamer *createAArch64AsmTargetStreamer(MCStreamer &S,
MCTargetStreamer *createAArch64ObjectTargetStreamer(MCStreamer &S,
const MCSubtargetInfo &STI);
-} // End llvm namespace
+} // namespace llvm
// Defines symbolic names for AArch64 registers. This defines a mapping from
// register name to register number.
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
index 67af810bbbec..b2f5bf3cf4b5 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp
@@ -38,7 +38,7 @@ public:
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) override;
};
-}
+} // namespace
bool AArch64MachObjectWriter::getAArch64FixupKindMachOInfo(
const MCFixup &Fixup, unsigned &RelocType, const MCSymbolRefExpr *Sym,
@@ -287,7 +287,7 @@ void AArch64MachObjectWriter::recordRelocation(
if (Symbol->isTemporary() && (Value || !CanUseLocalRelocation)) {
const MCSection &Sec = Symbol->getSection();
if (!Asm.getContext().getAsmInfo()->isSectionAtomizableBySymbols(Sec))
- Asm.addLocalUsedInReloc(*Symbol);
+ Symbol->setUsedInReloc();
}
const MCSymbol *Base = Asm.getAtom(*Symbol);
diff --git a/lib/Target/AArch64/Utils/AArch64BaseInfo.h b/lib/Target/AArch64/Utils/AArch64BaseInfo.h
index 7e42f8e3601e..40071f6b6bb7 100644
--- a/lib/Target/AArch64/Utils/AArch64BaseInfo.h
+++ b/lib/Target/AArch64/Utils/AArch64BaseInfo.h
@@ -346,7 +346,7 @@ namespace AArch64AT {
ATMapper();
};
-}
+} // namespace AArch64AT
namespace AArch64DB {
enum DBValues {
Invalid = -1,
@@ -369,7 +369,7 @@ namespace AArch64DB {
DBarrierMapper();
};
-}
+} // namespace AArch64DB
namespace AArch64DC {
enum DCValues {
@@ -390,7 +390,7 @@ namespace AArch64DC {
DCMapper();
};
-}
+} // namespace AArch64DC
namespace AArch64IC {
enum ICValues {
@@ -410,7 +410,7 @@ namespace AArch64IC {
static inline bool NeedsRegister(ICValues Val) {
return Val == IVAU;
}
-}
+} // namespace AArch64IC
namespace AArch64ISB {
enum ISBValues {
@@ -422,7 +422,7 @@ namespace AArch64ISB {
ISBMapper();
};
-}
+} // namespace AArch64ISB
namespace AArch64PRFM {
enum PRFMValues {
@@ -452,7 +452,7 @@ namespace AArch64PRFM {
PRFMMapper();
};
-}
+} // namespace AArch64PRFM
namespace AArch64PState {
enum PStateValues {
@@ -471,7 +471,7 @@ namespace AArch64PState {
PStateMapper();
};
-}
+} // namespace AArch64PState
namespace AArch64SE {
enum ShiftExtSpecifiers {
@@ -492,7 +492,7 @@ namespace AArch64SE {
SXTW,
SXTX
};
-}
+} // namespace AArch64SE
namespace AArch64Layout {
enum VectorLayout {
@@ -514,7 +514,7 @@ namespace AArch64Layout {
VL_S,
VL_D
};
-}
+} // namespace AArch64Layout
inline static const char *
AArch64VectorLayoutToString(AArch64Layout::VectorLayout Layout) {
@@ -1221,7 +1221,7 @@ namespace AArch64SysReg {
};
uint32_t ParseGenericRegister(StringRef Name, bool &Valid);
-}
+} // namespace AArch64SysReg
namespace AArch64TLBI {
enum TLBIValues {
@@ -1283,7 +1283,7 @@ namespace AArch64TLBI {
return true;
}
}
-}
+} // namespace AArch64TLBI
namespace AArch64II {
/// Target Operand Flag enum.
diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/AMDGPU/AMDGPU.h
index 0a05d25189b0..0a05d25189b0 100644
--- a/lib/Target/R600/AMDGPU.h
+++ b/lib/Target/AMDGPU/AMDGPU.h
diff --git a/lib/Target/R600/AMDGPU.td b/lib/Target/AMDGPU/AMDGPU.td
index 2e7e39a54d33..2e7e39a54d33 100644
--- a/lib/Target/R600/AMDGPU.td
+++ b/lib/Target/AMDGPU/AMDGPU.td
diff --git a/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp b/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp
index 0b426bc63dd5..0b426bc63dd5 100644
--- a/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp
diff --git a/lib/Target/R600/AMDGPUAsmPrinter.cpp b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 56b50a9c159b..afc6bcb52bb8 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.cpp
+++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -80,7 +80,7 @@ createAMDGPUAsmPrinterPass(TargetMachine &tm,
return new AMDGPUAsmPrinter(tm, std::move(Streamer));
}
-extern "C" void LLVMInitializeR600AsmPrinter() {
+extern "C" void LLVMInitializeAMDGPUAsmPrinter() {
TargetRegistry::RegisterAsmPrinter(TheAMDGPUTarget, createAMDGPUAsmPrinterPass);
TargetRegistry::RegisterAsmPrinter(TheGCNTarget, createAMDGPUAsmPrinterPass);
}
@@ -338,8 +338,10 @@ void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
ProgInfo.NumSGPR = MaxSGPR + 1;
if (STM.hasSGPRInitBug()) {
- if (ProgInfo.NumSGPR > AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG)
- llvm_unreachable("Too many SGPRs used with the SGPR init bug");
+ if (ProgInfo.NumSGPR > AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG) {
+ LLVMContext &Ctx = MF.getFunction()->getContext();
+ Ctx.emitError("too many SGPRs used with the SGPR init bug");
+ }
ProgInfo.NumSGPR = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG;
}
diff --git a/lib/Target/R600/AMDGPUAsmPrinter.h b/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
index 1acff3a3222f..92072512e6b5 100644
--- a/lib/Target/R600/AMDGPUAsmPrinter.h
+++ b/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
@@ -108,6 +108,6 @@ protected:
size_t DisasmLineMaxLen;
};
-} // End anonymous llvm
+} // namespace llvm
#endif
diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/AMDGPU/AMDGPUCallingConv.td
index 6ffa7a083583..6ffa7a083583 100644
--- a/lib/Target/R600/AMDGPUCallingConv.td
+++ b/lib/Target/AMDGPU/AMDGPUCallingConv.td
diff --git a/lib/Target/R600/AMDGPUFrameLowering.cpp b/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
index 8175786fb9b1..8175786fb9b1 100644
--- a/lib/Target/R600/AMDGPUFrameLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
diff --git a/lib/Target/R600/AMDGPUFrameLowering.h b/lib/Target/AMDGPU/AMDGPUFrameLowering.h
index 9f31be1af794..9f31be1af794 100644
--- a/lib/Target/R600/AMDGPUFrameLowering.h
+++ b/lib/Target/AMDGPU/AMDGPUFrameLowering.h
diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index df4461eac4db..df4461eac4db 100644
--- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d56838ec2019..570473d85585 100644
--- a/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -68,7 +68,7 @@ public:
};
int DiagnosticInfoUnsupported::KindID = 0;
-}
+} // namespace
static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
diff --git a/lib/Target/R600/AMDGPUISelLowering.h b/lib/Target/AMDGPU/AMDGPUISelLowering.h
index fbb7d3c88437..fbb7d3c88437 100644
--- a/lib/Target/R600/AMDGPUISelLowering.h
+++ b/lib/Target/AMDGPU/AMDGPUISelLowering.h
diff --git a/lib/Target/R600/AMDGPUInstrInfo.cpp b/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
index 64e295f1144c..15a3d543a68c 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
@@ -234,10 +234,9 @@ bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
// TODO: Implement this function
return false;
}
-bool
-AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2)
- const {
+
+bool AMDGPUInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const {
// TODO: Implement this function
return false;
}
diff --git a/lib/Target/R600/AMDGPUInstrInfo.h b/lib/Target/AMDGPU/AMDGPUInstrInfo.h
index 8fd27a17638b..31ae9a3c7760 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.h
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.h
@@ -125,8 +125,8 @@ public:
void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
bool isPredicated(const MachineInstr *MI) const override;
- bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const override;
+ bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const override;
bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const override;
bool isPredicable(MachineInstr *MI) const override;
@@ -198,7 +198,7 @@ namespace AMDGPU {
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
} // End namespace AMDGPU
-} // End llvm namespace
+} // namespace llvm
#define AMDGPU_FLAG_REGISTER_LOAD (UINT64_C(1) << 63)
#define AMDGPU_FLAG_REGISTER_STORE (UINT64_C(1) << 62)
diff --git a/lib/Target/R600/AMDGPUInstrInfo.td b/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index b413897d9d23..b413897d9d23 100644
--- a/lib/Target/R600/AMDGPUInstrInfo.td
+++ b/lib/Target/AMDGPU/AMDGPUInstrInfo.td
diff --git a/lib/Target/R600/AMDGPUInstructions.td b/lib/Target/AMDGPU/AMDGPUInstructions.td
index 72cab39277c6..72cab39277c6 100644
--- a/lib/Target/R600/AMDGPUInstructions.td
+++ b/lib/Target/AMDGPU/AMDGPUInstructions.td
diff --git a/lib/Target/R600/AMDGPUIntrinsicInfo.cpp b/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp
index e94bb6013d83..e94bb6013d83 100644
--- a/lib/Target/R600/AMDGPUIntrinsicInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp
diff --git a/lib/Target/R600/AMDGPUIntrinsicInfo.h b/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h
index 4c95b5ec0974..4c95b5ec0974 100644
--- a/lib/Target/R600/AMDGPUIntrinsicInfo.h
+++ b/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h
diff --git a/lib/Target/R600/AMDGPUIntrinsics.td b/lib/Target/AMDGPU/AMDGPUIntrinsics.td
index ab489cd2a4ab..ab489cd2a4ab 100644
--- a/lib/Target/R600/AMDGPUIntrinsics.td
+++ b/lib/Target/AMDGPU/AMDGPUIntrinsics.td
diff --git a/lib/Target/R600/AMDGPUMCInstLower.cpp b/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
index 20831460b933..20831460b933 100644
--- a/lib/Target/R600/AMDGPUMCInstLower.cpp
+++ b/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
diff --git a/lib/Target/R600/AMDGPUMCInstLower.h b/lib/Target/AMDGPU/AMDGPUMCInstLower.h
index d322fe072b2b..d322fe072b2b 100644
--- a/lib/Target/R600/AMDGPUMCInstLower.h
+++ b/lib/Target/AMDGPU/AMDGPUMCInstLower.h
diff --git a/lib/Target/R600/AMDGPUMachineFunction.cpp b/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
index 21c7da663234..21c7da663234 100644
--- a/lib/Target/R600/AMDGPUMachineFunction.cpp
+++ b/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
diff --git a/lib/Target/R600/AMDGPUMachineFunction.h b/lib/Target/AMDGPU/AMDGPUMachineFunction.h
index f5e4694e76f6..e17b41ad5f21 100644
--- a/lib/Target/R600/AMDGPUMachineFunction.h
+++ b/lib/Target/AMDGPU/AMDGPUMachineFunction.h
@@ -41,5 +41,5 @@ public:
bool IsKernel;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/R600/AMDGPUPromoteAlloca.cpp b/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index 4a65bfc57f14..4a65bfc57f14 100644
--- a/lib/Target/R600/AMDGPUPromoteAlloca.cpp
+++ b/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
diff --git a/lib/Target/R600/AMDGPURegisterInfo.cpp b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
index 3ca0eca3417f..3ca0eca3417f 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
diff --git a/lib/Target/R600/AMDGPURegisterInfo.h b/lib/Target/AMDGPU/AMDGPURegisterInfo.h
index cfd800bdc703..cfd800bdc703 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.h
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.h
diff --git a/lib/Target/R600/AMDGPURegisterInfo.td b/lib/Target/AMDGPU/AMDGPURegisterInfo.td
index 835a1464395c..835a1464395c 100644
--- a/lib/Target/R600/AMDGPURegisterInfo.td
+++ b/lib/Target/AMDGPU/AMDGPURegisterInfo.td
diff --git a/lib/Target/R600/AMDGPUSubtarget.cpp b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index 5288866ba665..605ccd0e1361 100644
--- a/lib/Target/R600/AMDGPUSubtarget.cpp
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -32,8 +32,8 @@ using namespace llvm;
#include "AMDGPUGenSubtargetInfo.inc"
AMDGPUSubtarget &
-AMDGPUSubtarget::initializeSubtargetDependencies(StringRef TT, StringRef GPU,
- StringRef FS) {
+AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT,
+ StringRef GPU, StringRef FS) {
// Determine default and user-specified characteristics
// On SI+, we want FP64 denormals to be on by default. FP32 denormals can be
// enabled, but some instructions do not respect them and they run at the
@@ -46,7 +46,7 @@ AMDGPUSubtarget::initializeSubtargetDependencies(StringRef TT, StringRef GPU,
SmallString<256> FullFS("+promote-alloca,+fp64-denormals,");
FullFS += FS;
- if (GPU == "" && Triple(TT).getArch() == Triple::amdgcn)
+ if (GPU == "" && TT.getArch() == Triple::amdgcn)
GPU = "SI";
ParseSubtargetFeatures(GPU, FullFS);
@@ -61,7 +61,7 @@ AMDGPUSubtarget::initializeSubtargetDependencies(StringRef TT, StringRef GPU,
return *this;
}
-AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef GPU, StringRef FS,
+AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
TargetMachine &TM)
: AMDGPUGenSubtargetInfo(TT, GPU, FS), DevName(GPU), Is64bit(false),
DumpCode(false), R600ALUInst(false), HasVertexCache(false),
@@ -70,9 +70,8 @@ AMDGPUSubtarget::AMDGPUSubtarget(StringRef TT, StringRef GPU, StringRef FS,
CaymanISA(false), FlatAddressSpace(false), EnableIRStructurizer(true),
EnablePromoteAlloca(false), EnableIfCvt(true), EnableLoadStoreOpt(false),
WavefrontSize(0), CFALUBug(false), LocalMemorySize(0),
- EnableVGPRSpilling(false), SGPRInitBug(false),
- IsGCN(false), GCN1Encoding(false), GCN3Encoding(false), CIInsts(false),
- LDSBankCount(0),
+ EnableVGPRSpilling(false), SGPRInitBug(false), IsGCN(false),
+ GCN1Encoding(false), GCN3Encoding(false), CIInsts(false), LDSBankCount(0),
FrameLowering(TargetFrameLowering::StackGrowsUp,
64 * 16, // Maximum stack alignment (long16)
0),
diff --git a/lib/Target/R600/AMDGPUSubtarget.h b/lib/Target/AMDGPU/AMDGPUSubtarget.h
index a5a901c739d4..0d40d14f8203 100644
--- a/lib/Target/R600/AMDGPUSubtarget.h
+++ b/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -85,9 +85,10 @@ private:
Triple TargetTriple;
public:
- AMDGPUSubtarget(StringRef TT, StringRef CPU, StringRef FS, TargetMachine &TM);
- AMDGPUSubtarget &initializeSubtargetDependencies(StringRef TT, StringRef GPU,
- StringRef FS);
+ AMDGPUSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
+ TargetMachine &TM);
+ AMDGPUSubtarget &initializeSubtargetDependencies(const Triple &TT,
+ StringRef GPU, StringRef FS);
const AMDGPUFrameLowering *getFrameLowering() const override {
return &FrameLowering;
diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 44c2abd294f7..a9a911a8efed 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -37,7 +37,7 @@
using namespace llvm;
-extern "C" void LLVMInitializeR600Target() {
+extern "C" void LLVMInitializeAMDGPUTarget() {
// Register the target
RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
@@ -51,11 +51,10 @@ static MachineSchedRegistry
SchedCustomRegistry("r600", "Run R600's custom scheduler",
createR600MachineScheduler);
-static std::string computeDataLayout(StringRef TT) {
- Triple Triple(TT);
+static std::string computeDataLayout(const Triple &TT) {
std::string Ret = "e-p:32:32";
- if (Triple.getArch() == Triple::amdgcn) {
+ if (TT.getArch() == Triple::amdgcn) {
// 32-bit private, local, and region pointers. 64-bit global and constant.
Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
}
@@ -66,7 +65,7 @@ static std::string computeDataLayout(StringRef TT) {
return Ret;
}
-AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
+AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
TargetOptions Options, Reloc::Model RM,
CodeModel::Model CM,
@@ -87,20 +86,21 @@ AMDGPUTargetMachine::~AMDGPUTargetMachine() {
// R600 Target Machine (R600 -> Cayman)
//===----------------------------------------------------------------------===//
-R600TargetMachine::R600TargetMachine(const Target &T, StringRef TT, StringRef FS,
- StringRef CPU, TargetOptions Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL) :
- AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) { }
-
+R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
+ StringRef FS, StringRef CPU,
+ TargetOptions Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL)
+ : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
//===----------------------------------------------------------------------===//
// GCN Target Machine (SI+)
//===----------------------------------------------------------------------===//
-GCNTargetMachine::GCNTargetMachine(const Target &T, StringRef TT, StringRef FS,
- StringRef CPU, TargetOptions Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL) :
- AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) { }
+GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
+ StringRef FS, StringRef CPU,
+ TargetOptions Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL)
+ : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
//===----------------------------------------------------------------------===//
// AMDGPU Pass Setup
diff --git a/lib/Target/R600/AMDGPUTargetMachine.h b/lib/Target/AMDGPU/AMDGPUTargetMachine.h
index 785c119a1028..14792e347a7a 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.h
+++ b/lib/Target/AMDGPU/AMDGPUTargetMachine.h
@@ -37,7 +37,7 @@ protected:
AMDGPUIntrinsicInfo IntrinsicInfo;
public:
- AMDGPUTargetMachine(const Target &T, StringRef TT, StringRef FS,
+ AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef FS,
StringRef CPU, TargetOptions Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL);
~AMDGPUTargetMachine();
@@ -63,7 +63,7 @@ public:
class R600TargetMachine : public AMDGPUTargetMachine {
public:
- R600TargetMachine(const Target &T, StringRef TT, StringRef FS,
+ R600TargetMachine(const Target &T, const Triple &TT, StringRef FS,
StringRef CPU, TargetOptions Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL);
@@ -77,9 +77,9 @@ public:
class GCNTargetMachine : public AMDGPUTargetMachine {
public:
- GCNTargetMachine(const Target &T, StringRef TT, StringRef FS,
- StringRef CPU, TargetOptions Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL);
+ GCNTargetMachine(const Target &T, const Triple &TT, StringRef FS,
+ StringRef CPU, TargetOptions Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL);
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
};
diff --git a/lib/Target/R600/AMDGPUTargetTransformInfo.cpp b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 6dacc742b129..6dacc742b129 100644
--- a/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
+++ b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
diff --git a/lib/Target/R600/AMDGPUTargetTransformInfo.h b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
index 791c84e6f28b..791c84e6f28b 100644
--- a/lib/Target/R600/AMDGPUTargetTransformInfo.h
+++ b/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
index c9b25a1a0b84..c9b25a1a0b84 100644
--- a/lib/Target/R600/AMDILCFGStructurizer.cpp
+++ b/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
diff --git a/lib/Target/R600/AMDKernelCodeT.h b/lib/Target/AMDGPU/AMDKernelCodeT.h
index 4d3041ff3db8..eaffb854793c 100644
--- a/lib/Target/R600/AMDKernelCodeT.h
+++ b/lib/Target/AMDGPU/AMDKernelCodeT.h
@@ -132,7 +132,7 @@ enum amd_code_property_mask_t {
/// private memory do not exceed this size. For example, if the
/// element size is 4 (32-bits or dword) and a 64-bit value must be
/// loaded, the finalizer will generate two 32-bit loads. This
- /// ensures that the interleaving will get the the work-item
+ /// ensures that the interleaving will get the work-item
/// specific dword for both halves of the 64-bit value. If it just
/// did a 64-bit load then it would get one dword which belonged to
/// its own work-item, but the second dword would belong to the
diff --git a/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 95025a6e29f1..80081d40d089 100644
--- a/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp
+++ b/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -376,6 +376,10 @@ public:
OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
+ OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
+ OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
+ void cvtFlat(MCInst &Inst, const OperandVector &Operands);
+
void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseOffset(OperandVector &Operands);
OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
@@ -399,7 +403,7 @@ struct OptionalOperand {
bool (*ConvertResult)(int64_t&);
};
-}
+} // namespace
static unsigned getRegClass(bool IsVgpr, unsigned RegWidth) {
if (IsVgpr) {
@@ -1092,6 +1096,67 @@ AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
}
//===----------------------------------------------------------------------===//
+// flat
+//===----------------------------------------------------------------------===//
+
+static const OptionalOperand FlatOptionalOps [] = {
+ {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
+ {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
+ {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
+};
+
+static const OptionalOperand FlatAtomicOptionalOps [] = {
+ {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
+ {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
+};
+
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
+ return parseOptionalOps(FlatOptionalOps, Operands);
+}
+
+AMDGPUAsmParser::OperandMatchResultTy
+AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
+ return parseOptionalOps(FlatAtomicOptionalOps, Operands);
+}
+
+void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
+ const OperandVector &Operands) {
+ std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
+
+ for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
+ AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
+
+ // Add the register arguments
+ if (Op.isReg()) {
+ Op.addRegOperands(Inst, 1);
+ continue;
+ }
+
+ // Handle 'glc' token which is sometimes hard-coded into the
+ // asm string. There are no MCInst operands for these.
+ if (Op.isToken())
+ continue;
+
+ // Handle optional arguments
+ OptionalIdx[Op.getImmTy()] = i;
+
+ }
+
+ // flat atomic instructions don't have a glc argument.
+ if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
+ unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
+ ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
+ }
+
+ unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
+ unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
+
+ ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
+ ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
+}
+
+//===----------------------------------------------------------------------===//
// mubuf
//===----------------------------------------------------------------------===//
@@ -1304,7 +1369,7 @@ void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
}
/// Force static initialization.
-extern "C" void LLVMInitializeR600AsmParser() {
+extern "C" void LLVMInitializeAMDGPUAsmParser() {
RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
}
diff --git a/lib/Target/AMDGPU/AsmParser/CMakeLists.txt b/lib/Target/AMDGPU/AsmParser/CMakeLists.txt
new file mode 100644
index 000000000000..21ddc4eb83d2
--- /dev/null
+++ b/lib/Target/AMDGPU/AsmParser/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMAMDGPUAsmParser
+ AMDGPUAsmParser.cpp
+ )
diff --git a/lib/Target/AMDGPU/AsmParser/LLVMBuild.txt b/lib/Target/AMDGPU/AsmParser/LLVMBuild.txt
new file mode 100644
index 000000000000..63d44d1e06f1
--- /dev/null
+++ b/lib/Target/AMDGPU/AsmParser/LLVMBuild.txt
@@ -0,0 +1,23 @@
+;===- ./lib/Target/AMDGPU/AsmParser/LLVMBuild.txt -------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = AMDGPUAsmParser
+parent = AMDGPU
+required_libraries = MC MCParser AMDGPUDesc AMDGPUInfo Support
+add_to_library_groups = AMDGPU
diff --git a/lib/Target/R600/AsmParser/Makefile b/lib/Target/AMDGPU/AsmParser/Makefile
index e6689b54b6ba..5ad219028036 100644
--- a/lib/Target/R600/AsmParser/Makefile
+++ b/lib/Target/AMDGPU/AsmParser/Makefile
@@ -1,4 +1,4 @@
-##===- lib/Target/R600/AsmParser/Makefile ----------------*- Makefile -*-===##
+##===- lib/Target/AMDGPU/AsmParser/Makefile ----------------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
@@ -7,9 +7,9 @@
#
##===----------------------------------------------------------------------===##
LEVEL = ../../../..
-LIBRARYNAME = LLVMR600AsmParser
+LIBRARYNAME = LLVMAMDGPUAsmParser
-# Hack: we need to include 'main' R600 target directory to grab private headers
+# Hack: we need to include 'main' AMDGPU target directory to grab private headers
CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
include $(LEVEL)/Makefile.common
diff --git a/lib/Target/AMDGPU/CIInstructions.td b/lib/Target/AMDGPU/CIInstructions.td
new file mode 100644
index 000000000000..2f5fdbe92078
--- /dev/null
+++ b/lib/Target/AMDGPU/CIInstructions.td
@@ -0,0 +1,149 @@
+//===-- CIInstructions.td - CI Instruction Defintions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Instruction definitions for CI and newer.
+//===----------------------------------------------------------------------===//
+
+
+def isCIVI : Predicate <
+ "Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS || "
+ "Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS"
+>, AssemblerPredicate<"FeatureCIInsts">;
+
+def HasFlatAddressSpace : Predicate<"Subtarget->hasFlatAddressSpace()">;
+
+//===----------------------------------------------------------------------===//
+// VOP1 Instructions
+//===----------------------------------------------------------------------===//
+
+let SubtargetPredicate = isCIVI in {
+
+defm V_TRUNC_F64 : VOP1Inst <vop1<0x17>, "v_trunc_f64",
+ VOP_F64_F64, ftrunc
+>;
+defm V_CEIL_F64 : VOP1Inst <vop1<0x18>, "v_ceil_f64",
+ VOP_F64_F64, fceil
+>;
+defm V_FLOOR_F64 : VOP1Inst <vop1<0x1A>, "v_floor_f64",
+ VOP_F64_F64, ffloor
+>;
+defm V_RNDNE_F64 : VOP1Inst <vop1<0x19>, "v_rndne_f64",
+ VOP_F64_F64, frint
+>;
+defm V_LOG_LEGACY_F32 : VOP1Inst <vop1<0x45, 0x4c>, "v_log_legacy_f32",
+ VOP_F32_F32
+>;
+defm V_EXP_LEGACY_F32 : VOP1Inst <vop1<0x46, 0x4b>, "v_exp_legacy_f32",
+ VOP_F32_F32
+>;
+
+//===----------------------------------------------------------------------===//
+// Flat Instructions
+//===----------------------------------------------------------------------===//
+
+def FLAT_LOAD_UBYTE : FLAT_Load_Helper <0x8, "flat_load_ubyte", VGPR_32>;
+def FLAT_LOAD_SBYTE : FLAT_Load_Helper <0x9, "flat_load_sbyte", VGPR_32>;
+def FLAT_LOAD_USHORT : FLAT_Load_Helper <0xa, "flat_load_ushort", VGPR_32>;
+def FLAT_LOAD_SSHORT : FLAT_Load_Helper <0xb, "flat_load_sshort", VGPR_32>;
+def FLAT_LOAD_DWORD : FLAT_Load_Helper <0xc, "flat_load_dword", VGPR_32>;
+def FLAT_LOAD_DWORDX2 : FLAT_Load_Helper <0xd, "flat_load_dwordx2", VReg_64>;
+def FLAT_LOAD_DWORDX4 : FLAT_Load_Helper <0xe, "flat_load_dwordx4", VReg_128>;
+def FLAT_LOAD_DWORDX3 : FLAT_Load_Helper <0xf, "flat_load_dwordx3", VReg_96>;
+def FLAT_STORE_BYTE : FLAT_Store_Helper <0x18, "flat_store_byte", VGPR_32>;
+def FLAT_STORE_SHORT : FLAT_Store_Helper <0x1a, "flat_store_short", VGPR_32>;
+def FLAT_STORE_DWORD : FLAT_Store_Helper <0x1c, "flat_store_dword", VGPR_32>;
+def FLAT_STORE_DWORDX2 : FLAT_Store_Helper <
+ 0x1d, "flat_store_dwordx2", VReg_64
+>;
+def FLAT_STORE_DWORDX4 : FLAT_Store_Helper <
+ 0x1e, "flat_store_dwordx4", VReg_128
+>;
+def FLAT_STORE_DWORDX3 : FLAT_Store_Helper <
+ 0x1f, "flat_store_dwordx3", VReg_96
+>;
+defm FLAT_ATOMIC_SWAP : FLAT_ATOMIC <0x30, "flat_atomic_swap", VGPR_32>;
+defm FLAT_ATOMIC_CMPSWAP : FLAT_ATOMIC <
+ 0x31, "flat_atomic_cmpswap", VGPR_32, VReg_64
+>;
+defm FLAT_ATOMIC_ADD : FLAT_ATOMIC <0x32, "flat_atomic_add", VGPR_32>;
+defm FLAT_ATOMIC_SUB : FLAT_ATOMIC <0x33, "flat_atomic_sub", VGPR_32>;
+defm FLAT_ATOMIC_RSUB : FLAT_ATOMIC <0x34, "flat_atomic_rsub", VGPR_32>;
+defm FLAT_ATOMIC_SMIN : FLAT_ATOMIC <0x35, "flat_atomic_smin", VGPR_32>;
+defm FLAT_ATOMIC_UMIN : FLAT_ATOMIC <0x36, "flat_atomic_umin", VGPR_32>;
+defm FLAT_ATOMIC_SMAX : FLAT_ATOMIC <0x37, "flat_atomic_smax", VGPR_32>;
+defm FLAT_ATOMIC_UMAX : FLAT_ATOMIC <0x38, "flat_atomic_umax", VGPR_32>;
+defm FLAT_ATOMIC_AND : FLAT_ATOMIC <0x39, "flat_atomic_and", VGPR_32>;
+defm FLAT_ATOMIC_OR : FLAT_ATOMIC <0x3a, "flat_atomic_or", VGPR_32>;
+defm FLAT_ATOMIC_XOR : FLAT_ATOMIC <0x3b, "flat_atomic_xor", VGPR_32>;
+defm FLAT_ATOMIC_INC : FLAT_ATOMIC <0x3c, "flat_atomic_inc", VGPR_32>;
+defm FLAT_ATOMIC_DEC : FLAT_ATOMIC <0x3d, "flat_atomic_dec", VGPR_32>;
+defm FLAT_ATOMIC_FCMPSWAP : FLAT_ATOMIC <
+ 0x3e, "flat_atomic_fcmpswap", VGPR_32, VReg_64
+>;
+defm FLAT_ATOMIC_FMIN : FLAT_ATOMIC <0x3f, "flat_atomic_fmin", VGPR_32>;
+defm FLAT_ATOMIC_FMAX : FLAT_ATOMIC <0x40, "flat_atomic_fmax", VGPR_32>;
+defm FLAT_ATOMIC_SWAP_X2 : FLAT_ATOMIC <0x50, "flat_atomic_swap_x2", VReg_64>;
+defm FLAT_ATOMIC_CMPSWAP_X2 : FLAT_ATOMIC <
+ 0x51, "flat_atomic_cmpswap_x2", VReg_64, VReg_128
+>;
+defm FLAT_ATOMIC_ADD_X2 : FLAT_ATOMIC <0x52, "flat_atomic_add_x2", VReg_64>;
+defm FLAT_ATOMIC_SUB_X2 : FLAT_ATOMIC <0x53, "flat_atomic_sub_x2", VReg_64>;
+defm FLAT_ATOMIC_RSUB_X2 : FLAT_ATOMIC <0x54, "flat_atomic_rsub_x2", VReg_64>;
+defm FLAT_ATOMIC_SMIN_X2 : FLAT_ATOMIC <0x55, "flat_atomic_smin_x2", VReg_64>;
+defm FLAT_ATOMIC_UMIN_X2 : FLAT_ATOMIC <0x56, "flat_atomic_umin_x2", VReg_64>;
+defm FLAT_ATOMIC_SMAX_X2 : FLAT_ATOMIC <0x57, "flat_atomic_smax_x2", VReg_64>;
+defm FLAT_ATOMIC_UMAX_X2 : FLAT_ATOMIC <0x58, "flat_atomic_umax_x2", VReg_64>;
+defm FLAT_ATOMIC_AND_X2 : FLAT_ATOMIC <0x59, "flat_atomic_and_x2", VReg_64>;
+defm FLAT_ATOMIC_OR_X2 : FLAT_ATOMIC <0x5a, "flat_atomic_or_x2", VReg_64>;
+defm FLAT_ATOMIC_XOR_X2 : FLAT_ATOMIC <0x5b, "flat_atomic_xor_x2", VReg_64>;
+defm FLAT_ATOMIC_INC_X2 : FLAT_ATOMIC <0x5c, "flat_atomic_inc_x2", VReg_64>;
+defm FLAT_ATOMIC_DEC_X2 : FLAT_ATOMIC <0x5d, "flat_atomic_dec_x2", VReg_64>;
+defm FLAT_ATOMIC_FCMPSWAP_X2 : FLAT_ATOMIC <
+ 0x5e, "flat_atomic_fcmpswap_x2", VReg_64, VReg_128
+>;
+defm FLAT_ATOMIC_FMIN_X2 : FLAT_ATOMIC <0x5f, "flat_atomic_fmin_x2", VReg_64>;
+defm FLAT_ATOMIC_FMAX_X2 : FLAT_ATOMIC <0x60, "flat_atomic_fmax_x2", VReg_64>;
+
+} // End SubtargetPredicate = isCIVI
+
+//===----------------------------------------------------------------------===//
+// Flat Patterns
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasFlatAddressSpace] in {
+
+class FLATLoad_Pattern <FLAT Instr_ADDR64, ValueType vt,
+ PatFrag flat_ld> :
+ Pat <(vt (flat_ld i64:$ptr)),
+ (Instr_ADDR64 $ptr, 0, 0, 0)
+>;
+
+def : FLATLoad_Pattern <FLAT_LOAD_SBYTE, i32, sextloadi8_flat>;
+def : FLATLoad_Pattern <FLAT_LOAD_UBYTE, i32, az_extloadi8_flat>;
+def : FLATLoad_Pattern <FLAT_LOAD_SSHORT, i32, sextloadi16_flat>;
+def : FLATLoad_Pattern <FLAT_LOAD_USHORT, i32, az_extloadi16_flat>;
+def : FLATLoad_Pattern <FLAT_LOAD_DWORD, i32, flat_load>;
+def : FLATLoad_Pattern <FLAT_LOAD_DWORDX2, i64, flat_load>;
+def : FLATLoad_Pattern <FLAT_LOAD_DWORDX2, i64, az_extloadi32_flat>;
+def : FLATLoad_Pattern <FLAT_LOAD_DWORDX2, v2i32, flat_load>;
+def : FLATLoad_Pattern <FLAT_LOAD_DWORDX4, v4i32, flat_load>;
+
+class FLATStore_Pattern <FLAT Instr, ValueType vt, PatFrag st> :
+ Pat <(st vt:$value, i64:$ptr),
+ (Instr $value, $ptr, 0, 0, 0)
+ >;
+
+def : FLATStore_Pattern <FLAT_STORE_BYTE, i32, truncstorei8_flat>;
+def : FLATStore_Pattern <FLAT_STORE_SHORT, i32, truncstorei16_flat>;
+def : FLATStore_Pattern <FLAT_STORE_DWORD, i32, flat_store>;
+def : FLATStore_Pattern <FLAT_STORE_DWORDX2, i64, flat_store>;
+def : FLATStore_Pattern <FLAT_STORE_DWORDX2, v2i32, flat_store>;
+def : FLATStore_Pattern <FLAT_STORE_DWORDX4, v4i32, flat_store>;
+
+} // End HasFlatAddressSpace predicate
+
diff --git a/lib/Target/R600/CMakeLists.txt b/lib/Target/AMDGPU/CMakeLists.txt
index 3c1bc49f2823..3e5ff1f3c6d4 100644
--- a/lib/Target/R600/CMakeLists.txt
+++ b/lib/Target/AMDGPU/CMakeLists.txt
@@ -12,7 +12,7 @@ tablegen(LLVM AMDGPUGenAsmWriter.inc -gen-asm-writer)
tablegen(LLVM AMDGPUGenAsmMatcher.inc -gen-asm-matcher)
add_public_tablegen_target(AMDGPUCommonTableGen)
-add_llvm_target(R600CodeGen
+add_llvm_target(AMDGPUCodeGen
AMDILCFGStructurizer.cpp
AMDGPUAlwaysInlinePass.cpp
AMDGPUAsmPrinter.cpp
diff --git a/lib/Target/R600/CaymanInstructions.td b/lib/Target/AMDGPU/CaymanInstructions.td
index ba4df82a6d37..ba4df82a6d37 100644
--- a/lib/Target/R600/CaymanInstructions.td
+++ b/lib/Target/AMDGPU/CaymanInstructions.td
diff --git a/lib/Target/R600/EvergreenInstructions.td b/lib/Target/AMDGPU/EvergreenInstructions.td
index 7adcd46fe196..7adcd46fe196 100644
--- a/lib/Target/R600/EvergreenInstructions.td
+++ b/lib/Target/AMDGPU/EvergreenInstructions.td
diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp b/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp
index f70676943bb3..e811d5cff221 100644
--- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
+++ b/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp
@@ -424,7 +424,7 @@ void AMDGPUInstPrinter::printLiteral(const MCInst *MI, unsigned OpNo,
void AMDGPUInstPrinter::printLast(const MCInst *MI, unsigned OpNo,
raw_ostream &O) {
- printIfSet(MI, OpNo, O.indent(25 - O.GetNumBytesInBuffer()), "*", " ");
+ printIfSet(MI, OpNo, O, "*", " ");
}
void AMDGPUInstPrinter::printNeg(const MCInst *MI, unsigned OpNo,
diff --git a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h b/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h
index 14fb511e9232..14fb511e9232 100644
--- a/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
+++ b/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h
diff --git a/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt b/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt
new file mode 100644
index 000000000000..ce63bd553b9c
--- /dev/null
+++ b/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMAMDGPUAsmPrinter
+ AMDGPUInstPrinter.cpp
+ )
diff --git a/lib/Target/R600/InstPrinter/LLVMBuild.txt b/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt
index ec0be89f104c..fdb43844dc63 100644
--- a/lib/Target/R600/InstPrinter/LLVMBuild.txt
+++ b/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt
@@ -1,4 +1,4 @@
-;===- ./lib/Target/R600/InstPrinter/LLVMBuild.txt -----------*- Conf -*--===;
+;===- ./lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt -----------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
@@ -17,8 +17,8 @@
[component_0]
type = Library
-name = R600AsmPrinter
-parent = R600
+name = AMDGPUAsmPrinter
+parent = AMDGPU
required_libraries = MC Support
-add_to_library_groups = R600
+add_to_library_groups = AMDGPU
diff --git a/lib/Target/R600/InstPrinter/Makefile b/lib/Target/AMDGPU/InstPrinter/Makefile
index a794cc1124ed..4e48ac7e28a9 100644
--- a/lib/Target/R600/InstPrinter/Makefile
+++ b/lib/Target/AMDGPU/InstPrinter/Makefile
@@ -7,7 +7,7 @@
#
##===----------------------------------------------------------------------===##
LEVEL = ../../../..
-LIBRARYNAME = LLVMR600AsmPrinter
+LIBRARYNAME = LLVMAMDGPUAsmPrinter
# Hack: we need to include 'main' x86 target directory to grab private headers
CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
diff --git a/lib/Target/R600/LLVMBuild.txt b/lib/Target/AMDGPU/LLVMBuild.txt
index f3f254fdcbad..c6861df91ed6 100644
--- a/lib/Target/R600/LLVMBuild.txt
+++ b/lib/Target/AMDGPU/LLVMBuild.txt
@@ -20,14 +20,14 @@ subdirectories = AsmParser InstPrinter MCTargetDesc TargetInfo
[component_0]
type = TargetGroup
-name = R600
+name = AMDGPU
parent = Target
has_asmparser = 1
has_asmprinter = 1
[component_1]
type = Library
-name = R600CodeGen
-parent = R600
-required_libraries = Analysis AsmPrinter CodeGen Core IPO MC R600AsmParser R600AsmPrinter R600Desc R600Info Scalar SelectionDAG Support Target TransformUtils
-add_to_library_groups = R600
+name = AMDGPUCodeGen
+parent = AMDGPU
+required_libraries = Analysis AsmPrinter CodeGen Core IPO MC AMDGPUAsmParser AMDGPUAsmPrinter AMDGPUDesc AMDGPUInfo Scalar SelectionDAG Support Target TransformUtils
+add_to_library_groups = AMDGPU
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
index 3713223697ed..8bed2deef4cd 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
@@ -139,7 +139,6 @@ public:
MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT,
- StringRef CPU) {
+ const Triple &TT, StringRef CPU) {
return new ELFAMDGPUAsmBackend(T);
}
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp
index 59f45ff02d88..59f45ff02d88 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h
index 01021d67ffd9..fa3b3c3d9489 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h
@@ -28,7 +28,7 @@ enum Fixups {
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
};
-}
-}
+} // namespace AMDGPU
+} // namespace llvm
#endif
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index 028a86dfc7ad..028a86dfc7ad 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
index a5bac51e356f..a5bac51e356f 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
index 521b3b39bba2..521b3b39bba2 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h
index c95742762233..c95742762233 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
index 1bc205d36fa1..a7d3dd1345f9 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
@@ -49,8 +49,8 @@ static MCRegisterInfo *createAMDGPUMCRegisterInfo(StringRef TT) {
return X;
}
-static MCSubtargetInfo *createAMDGPUMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
+static MCSubtargetInfo *
+createAMDGPUMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
MCSubtargetInfo * X = new MCSubtargetInfo();
InitAMDGPUMCSubtargetInfo(X, TT, CPU, FS);
return X;
@@ -72,7 +72,7 @@ static MCInstPrinter *createAMDGPUMCInstPrinter(const Triple &T,
return new AMDGPUInstPrinter(MAI, MII, MRI);
}
-extern "C" void LLVMInitializeR600TargetMC() {
+extern "C" void LLVMInitializeAMDGPUTargetMC() {
for (Target *T : {&TheAMDGPUTarget, &TheGCNTarget}) {
RegisterMCAsmInfo<AMDGPUMCAsmInfo> X(*T);
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
index 9a7548e9fbf8..ac611b862a1a 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
+++ b/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
@@ -28,6 +28,7 @@ class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
class Target;
+class Triple;
class raw_pwrite_stream;
class raw_ostream;
@@ -43,10 +44,10 @@ MCCodeEmitter *createSIMCCodeEmitter(const MCInstrInfo &MCII,
MCContext &Ctx);
MCAsmBackend *createAMDGPUAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
MCObjectWriter *createAMDGPUELFObjectWriter(raw_pwrite_stream &OS);
-} // End llvm namespace
+} // namespace llvm
#define GET_REGINFO_ENUM
#include "AMDGPUGenRegisterInfo.inc"
diff --git a/lib/Target/R600/MCTargetDesc/CMakeLists.txt b/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt
index 801c9054937d..151d0d5f83de 100644
--- a/lib/Target/R600/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt
@@ -1,5 +1,5 @@
-add_llvm_library(LLVMR600Desc
+add_llvm_library(LLVMAMDGPUDesc
AMDGPUAsmBackend.cpp
AMDGPUELFObjectWriter.cpp
AMDGPUMCCodeEmitter.cpp
diff --git a/lib/Target/R600/AsmParser/LLVMBuild.txt b/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt
index 940e4cee6dfd..4217bb362975 100644
--- a/lib/Target/R600/AsmParser/LLVMBuild.txt
+++ b/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt
@@ -1,4 +1,4 @@
-;===- ./lib/Target/R600/AsmParser/LLVMBuild.txt -------------*- Conf -*--===;
+;===- ./lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt -------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
@@ -17,7 +17,7 @@
[component_0]
type = Library
-name = R600AsmParser
-parent = R600
-required_libraries = MC MCParser R600Desc R600Info Support
-add_to_library_groups = R600
+name = AMDGPUDesc
+parent = AMDGPU
+required_libraries = MC AMDGPUAsmPrinter AMDGPUInfo Support
+add_to_library_groups = AMDGPU
diff --git a/lib/Target/R600/MCTargetDesc/Makefile b/lib/Target/AMDGPU/MCTargetDesc/Makefile
index 8894a7607f4f..5ad68662d98c 100644
--- a/lib/Target/R600/MCTargetDesc/Makefile
+++ b/lib/Target/AMDGPU/MCTargetDesc/Makefile
@@ -8,7 +8,7 @@
##===----------------------------------------------------------------------===##
LEVEL = ../../../..
-LIBRARYNAME = LLVMR600Desc
+LIBRARYNAME = LLVMAMDGPUDesc
# Hack: we need to include 'main' target directory to grab private headers
CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
diff --git a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp b/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
index e683498d52a5..e683498d52a5 100644
--- a/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
diff --git a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp b/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
index 65a0eeba2b16..65a0eeba2b16 100644
--- a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
+++ b/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
diff --git a/lib/Target/R600/Makefile b/lib/Target/AMDGPU/Makefile
index 64a7c8c045c5..2e2de5020867 100644
--- a/lib/Target/R600/Makefile
+++ b/lib/Target/AMDGPU/Makefile
@@ -8,7 +8,7 @@
##===----------------------------------------------------------------------===##
LEVEL = ../../..
-LIBRARYNAME = LLVMR600CodeGen
+LIBRARYNAME = LLVMAMDGPUCodeGen
TARGET = AMDGPU
# Make sure that tblgen is run, first thing.
diff --git a/lib/Target/R600/Processors.td b/lib/Target/AMDGPU/Processors.td
index c0ffede51999..c0ffede51999 100644
--- a/lib/Target/R600/Processors.td
+++ b/lib/Target/AMDGPU/Processors.td
diff --git a/lib/Target/R600/R600ClauseMergePass.cpp b/lib/Target/AMDGPU/R600ClauseMergePass.cpp
index 3cb90218a7d5..3cb90218a7d5 100644
--- a/lib/Target/R600/R600ClauseMergePass.cpp
+++ b/lib/Target/AMDGPU/R600ClauseMergePass.cpp
diff --git a/lib/Target/R600/R600ControlFlowFinalizer.cpp b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
index c8f37f61fc16..c8f37f61fc16 100644
--- a/lib/Target/R600/R600ControlFlowFinalizer.cpp
+++ b/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/AMDGPU/R600Defines.h
index 51d87eda31d1..6ff0a2204cfa 100644
--- a/lib/Target/R600/R600Defines.h
+++ b/lib/Target/AMDGPU/R600Defines.h
@@ -48,7 +48,7 @@ namespace R600_InstFlag {
IS_EXPORT = (1 << 17),
LDS_1A2D = (1 << 18)
};
-}
+} // namespace R600_InstFlag
#define HAS_NATIVE_OPERANDS(Flags) ((Flags) & R600_InstFlag::NATIVE_OPERANDS)
@@ -138,7 +138,7 @@ namespace OpName {
VEC_COUNT
};
-}
+} // namespace OpName
//===----------------------------------------------------------------------===//
// Config register definitions
diff --git a/lib/Target/R600/R600EmitClauseMarkers.cpp b/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp
index fdc20302f4a3..fdc20302f4a3 100644
--- a/lib/Target/R600/R600EmitClauseMarkers.cpp
+++ b/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp
diff --git a/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
index 211d392e8fcc..211d392e8fcc 100644
--- a/lib/Target/R600/R600ExpandSpecialInstrs.cpp
+++ b/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp
index 8357b6d9d0ed..8357b6d9d0ed 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/AMDGPU/R600ISelLowering.cpp
diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/AMDGPU/R600ISelLowering.h
index c06d3c4fd309..c25287806988 100644
--- a/lib/Target/R600/R600ISelLowering.h
+++ b/lib/Target/AMDGPU/R600ISelLowering.h
@@ -75,6 +75,6 @@ private:
SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
};
-} // End namespace llvm;
+} // namespace llvm
#endif
diff --git a/lib/Target/R600/R600InstrFormats.td b/lib/Target/AMDGPU/R600InstrFormats.td
index 0ffd485476ec..0ffd485476ec 100644
--- a/lib/Target/R600/R600InstrFormats.td
+++ b/lib/Target/AMDGPU/R600InstrFormats.td
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/AMDGPU/R600InstrInfo.cpp
index 5f0bdf348153..5ef883cbcadd 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -354,7 +354,7 @@ R600InstrInfo::ExtractSrcs(MachineInstr *MI,
const DenseMap<unsigned, unsigned> &PV,
unsigned &ConstCount) const {
ConstCount = 0;
- const SmallVector<std::pair<MachineOperand *, int64_t>, 3> Srcs = getSrcs(MI);
+ ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI);
const std::pair<int, unsigned> DummyPair(-1, 0);
std::vector<std::pair<int, unsigned> > Result;
unsigned i = 0;
@@ -628,8 +628,7 @@ R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
if (!isALUInstr(MI->getOpcode()))
continue;
- const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
- getSrcs(MI);
+ ArrayRef<std::pair<MachineOperand *, int64_t>> Srcs = getSrcs(MI);
for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
std::pair<MachineOperand *, unsigned> Src = Srcs[j];
@@ -782,7 +781,7 @@ unsigned
R600InstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
@@ -1000,15 +999,15 @@ R600InstrInfo::DefinesPredicate(MachineInstr *MI,
bool
-R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const {
+R600InstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const {
return false;
}
bool
R600InstrInfo::PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const {
+ ArrayRef<MachineOperand> Pred) const {
int PIdx = MI->findFirstPredOperandIdx();
if (MI->getOpcode() == AMDGPU::CF_ALU) {
diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/AMDGPU/R600InstrInfo.h
index d3dc0e58daa1..9c5f76c882f1 100644
--- a/lib/Target/R600/R600InstrInfo.h
+++ b/lib/Target/AMDGPU/R600InstrInfo.h
@@ -162,7 +162,9 @@ namespace llvm {
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond, bool AllowModify) const override;
- unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const override;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ DebugLoc DL) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
@@ -188,14 +190,14 @@ namespace llvm {
bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const override;
- bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const override;
+ bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const override;
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
MachineBasicBlock &FMBB) const override;
bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const override;
+ ArrayRef<MachineOperand> Pred) const override;
unsigned int getPredicationCost(const MachineInstr *) const override;
@@ -296,6 +298,6 @@ int getLDSNoRetOp(uint16_t Opcode);
} //End namespace AMDGPU
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/AMDGPU/R600Instructions.td
index 7beed092b3f7..7beed092b3f7 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/AMDGPU/R600Instructions.td
diff --git a/lib/Target/R600/R600Intrinsics.td b/lib/Target/AMDGPU/R600Intrinsics.td
index 9681747006d9..9681747006d9 100644
--- a/lib/Target/R600/R600Intrinsics.td
+++ b/lib/Target/AMDGPU/R600Intrinsics.td
diff --git a/lib/Target/R600/R600MachineFunctionInfo.cpp b/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
index 01105c614c55..01105c614c55 100644
--- a/lib/Target/R600/R600MachineFunctionInfo.cpp
+++ b/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
diff --git a/lib/Target/R600/R600MachineFunctionInfo.h b/lib/Target/AMDGPU/R600MachineFunctionInfo.h
index 263561edd30d..f5556c1e81fc 100644
--- a/lib/Target/R600/R600MachineFunctionInfo.h
+++ b/lib/Target/AMDGPU/R600MachineFunctionInfo.h
@@ -29,6 +29,6 @@ public:
unsigned StackSize;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/R600/R600MachineScheduler.cpp b/lib/Target/AMDGPU/R600MachineScheduler.cpp
index bcde5fb50dac..bcde5fb50dac 100644
--- a/lib/Target/R600/R600MachineScheduler.cpp
+++ b/lib/Target/AMDGPU/R600MachineScheduler.cpp
diff --git a/lib/Target/R600/R600MachineScheduler.h b/lib/Target/AMDGPU/R600MachineScheduler.h
index fc5b95c28e71..fc5b95c28e71 100644
--- a/lib/Target/R600/R600MachineScheduler.h
+++ b/lib/Target/AMDGPU/R600MachineScheduler.h
diff --git a/lib/Target/R600/R600OptimizeVectorRegisters.cpp b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 0c06ccc736d0..a1a1b4043429 100644
--- a/lib/Target/R600/R600OptimizeVectorRegisters.cpp
+++ b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -375,7 +375,7 @@ bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
return false;
}
-}
+} // namespace
llvm::FunctionPass *llvm::createR600VectorRegMerger(TargetMachine &tm) {
return new R600VectorRegMerger(tm);
diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/AMDGPU/R600Packetizer.cpp
index deee5bc39974..deee5bc39974 100644
--- a/lib/Target/R600/R600Packetizer.cpp
+++ b/lib/Target/AMDGPU/R600Packetizer.cpp
diff --git a/lib/Target/R600/R600RegisterInfo.cpp b/lib/Target/AMDGPU/R600RegisterInfo.cpp
index fb0359cfc651..fb0359cfc651 100644
--- a/lib/Target/R600/R600RegisterInfo.cpp
+++ b/lib/Target/AMDGPU/R600RegisterInfo.cpp
diff --git a/lib/Target/R600/R600RegisterInfo.h b/lib/Target/AMDGPU/R600RegisterInfo.h
index 9713e600a721..9713e600a721 100644
--- a/lib/Target/R600/R600RegisterInfo.h
+++ b/lib/Target/AMDGPU/R600RegisterInfo.h
diff --git a/lib/Target/R600/R600RegisterInfo.td b/lib/Target/AMDGPU/R600RegisterInfo.td
index cc667d985a82..cc667d985a82 100644
--- a/lib/Target/R600/R600RegisterInfo.td
+++ b/lib/Target/AMDGPU/R600RegisterInfo.td
diff --git a/lib/Target/R600/R600Schedule.td b/lib/Target/AMDGPU/R600Schedule.td
index df62bf85c0ad..df62bf85c0ad 100644
--- a/lib/Target/R600/R600Schedule.td
+++ b/lib/Target/AMDGPU/R600Schedule.td
diff --git a/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp b/lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp
index 2fc7b02f673f..93bcf680a022 100644
--- a/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
+++ b/lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp
@@ -296,7 +296,7 @@ public:
char R600TextureIntrinsicsReplacer::ID = 0;
-}
+} // namespace
FunctionPass *llvm::createR600TextureIntrinsicsReplacer() {
return new R600TextureIntrinsicsReplacer();
diff --git a/lib/Target/R600/R700Instructions.td b/lib/Target/AMDGPU/R700Instructions.td
index 613a0d729bb3..613a0d729bb3 100644
--- a/lib/Target/R600/R700Instructions.td
+++ b/lib/Target/AMDGPU/R700Instructions.td
diff --git a/lib/Target/R600/SIAnnotateControlFlow.cpp b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index ccfbf1bf19ed..ccfbf1bf19ed 100644
--- a/lib/Target/R600/SIAnnotateControlFlow.cpp
+++ b/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
diff --git a/lib/Target/R600/SIDefines.h b/lib/Target/AMDGPU/SIDefines.h
index 4727d971ab7a..f1b4ba1ac07d 100644
--- a/lib/Target/R600/SIDefines.h
+++ b/lib/Target/AMDGPU/SIDefines.h
@@ -39,7 +39,7 @@ enum {
WQM = 1 << 20,
VGPRSpill = 1 << 21
};
-}
+} // namespace SIInstrFlags
namespace llvm {
namespace AMDGPU {
@@ -74,7 +74,7 @@ namespace SIInstrFlags {
P_NORMAL = 1 << 8, // Positive normal
P_INFINITY = 1 << 9 // Positive infinity
};
-}
+} // namespace SIInstrFlags
namespace SISrcMods {
enum {
diff --git a/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp b/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp
index 5fe8d19426dd..5fe8d19426dd 100644
--- a/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp
+++ b/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp
diff --git a/lib/Target/R600/SIFixSGPRCopies.cpp b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
index 23502b45905c..23502b45905c 100644
--- a/lib/Target/R600/SIFixSGPRCopies.cpp
+++ b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
diff --git a/lib/Target/R600/SIFixSGPRLiveRanges.cpp b/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp
index 0c54446b0fb1..0c54446b0fb1 100644
--- a/lib/Target/R600/SIFixSGPRLiveRanges.cpp
+++ b/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp
diff --git a/lib/Target/R600/SIFoldOperands.cpp b/lib/Target/AMDGPU/SIFoldOperands.cpp
index d14e37a64612..d14e37a64612 100644
--- a/lib/Target/R600/SIFoldOperands.cpp
+++ b/lib/Target/AMDGPU/SIFoldOperands.cpp
diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index 12d08cf4c7f5..12d08cf4c7f5 100644
--- a/lib/Target/R600/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
diff --git a/lib/Target/R600/SIISelLowering.h b/lib/Target/AMDGPU/SIISelLowering.h
index a956b013bdb1..a956b013bdb1 100644
--- a/lib/Target/R600/SIISelLowering.h
+++ b/lib/Target/AMDGPU/SIISelLowering.h
diff --git a/lib/Target/R600/SIInsertWaits.cpp b/lib/Target/AMDGPU/SIInsertWaits.cpp
index 90a37f174682..90a37f174682 100644
--- a/lib/Target/R600/SIInsertWaits.cpp
+++ b/lib/Target/AMDGPU/SIInsertWaits.cpp
diff --git a/lib/Target/R600/SIInstrFormats.td b/lib/Target/AMDGPU/SIInstrFormats.td
index 3dddd246cec0..211666a9bdbc 100644
--- a/lib/Target/R600/SIInstrFormats.td
+++ b/lib/Target/AMDGPU/SIInstrFormats.td
@@ -655,6 +655,8 @@ class FLAT <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
let UseNamedOperandTable = 1;
let hasSideEffects = 0;
+ let AsmMatchConverter = "cvtFlat";
+ let SchedRW = [WriteVMEM];
}
class MIMG <bits<7> op, dag outs, dag ins, string asm, list<dag> pattern> :
diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index d647c25286fb..47bc17823b3f 100644
--- a/lib/Target/R600/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -200,9 +200,9 @@ static bool isStride64(unsigned Opc) {
}
}
-bool SIInstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt,
- unsigned &BaseReg, unsigned &Offset,
- const TargetRegisterInfo *TRI) const {
+bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
unsigned Opc = LdSt->getOpcode();
if (isDS(Opc)) {
const MachineOperand *OffsetImm = getNamedOperand(*LdSt,
@@ -1053,8 +1053,8 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa,
unsigned BaseReg0, Offset0;
unsigned BaseReg1, Offset1;
- if (getLdStBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
- getLdStBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
+ if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
+ getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() &&
"read2 / write2 not expected here yet");
unsigned Width0 = (*MIa->memoperands_begin())->getSize();
@@ -1806,7 +1806,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
}
MachineBasicBlock &MBB = *MI->getParent();
- // Extract the the ptr from the resource descriptor.
+ // Extract the ptr from the resource descriptor.
// SRsrcPtrLo = srsrc:sub0
unsigned SRsrcPtrLo = buildExtractSubReg(MI, MRI, *SRsrc,
diff --git a/lib/Target/R600/SIInstrInfo.h b/lib/Target/AMDGPU/SIInstrInfo.h
index 64b5120841c4..6fafb945c993 100644
--- a/lib/Target/R600/SIInstrInfo.h
+++ b/lib/Target/AMDGPU/SIInstrInfo.h
@@ -79,9 +79,9 @@ public:
int64_t &Offset1,
int64_t &Offset2) const override;
- bool getLdStBaseRegImmOfs(MachineInstr *LdSt,
- unsigned &BaseReg, unsigned &Offset,
- const TargetRegisterInfo *TRI) const final;
+ bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const final;
bool shouldClusterLoads(MachineInstr *FirstLdSt,
MachineInstr *SecondLdSt,
diff --git a/lib/Target/R600/SIInstrInfo.td b/lib/Target/AMDGPU/SIInstrInfo.td
index 4fc24989b3b8..93e4ca74ec38 100644
--- a/lib/Target/R600/SIInstrInfo.td
+++ b/lib/Target/AMDGPU/SIInstrInfo.td
@@ -390,27 +390,38 @@ class GDSBaseMatchClass <string parser> : AsmOperandClass {
def GDSMatchClass : GDSBaseMatchClass <"parseDSOptionalOps">;
def GDS01MatchClass : GDSBaseMatchClass <"parseDSOff01OptionalOps">;
-def GLCMatchClass : AsmOperandClass {
- let Name = "GLC";
+class GLCBaseMatchClass <string parser> : AsmOperandClass {
+ let Name = "GLC"#parser;
let PredicateMethod = "isImm";
- let ParserMethod = "parseMubufOptionalOps";
+ let ParserMethod = parser;
let RenderMethod = "addImmOperands";
}
-def SLCMatchClass : AsmOperandClass {
- let Name = "SLC";
+def GLCMubufMatchClass : GLCBaseMatchClass <"parseMubufOptionalOps">;
+def GLCFlatMatchClass : GLCBaseMatchClass <"parseFlatOptionalOps">;
+
+class SLCBaseMatchClass <string parser> : AsmOperandClass {
+ let Name = "SLC"#parser;
let PredicateMethod = "isImm";
- let ParserMethod = "parseMubufOptionalOps";
+ let ParserMethod = parser;
let RenderMethod = "addImmOperands";
}
-def TFEMatchClass : AsmOperandClass {
- let Name = "TFE";
+def SLCMubufMatchClass : SLCBaseMatchClass <"parseMubufOptionalOps">;
+def SLCFlatMatchClass : SLCBaseMatchClass <"parseFlatOptionalOps">;
+def SLCFlatAtomicMatchClass : SLCBaseMatchClass <"parseFlatAtomicOptionalOps">;
+
+class TFEBaseMatchClass <string parser> : AsmOperandClass {
+ let Name = "TFE"#parser;
let PredicateMethod = "isImm";
- let ParserMethod = "parseMubufOptionalOps";
+ let ParserMethod = parser;
let RenderMethod = "addImmOperands";
}
+def TFEMubufMatchClass : TFEBaseMatchClass <"parseMubufOptionalOps">;
+def TFEFlatMatchClass : TFEBaseMatchClass <"parseFlatOptionalOps">;
+def TFEFlatAtomicMatchClass : TFEBaseMatchClass <"parseFlatAtomicOptionalOps">;
+
def OModMatchClass : AsmOperandClass {
let Name = "OMod";
let PredicateMethod = "isImm";
@@ -463,19 +474,32 @@ def gds : gds_base <GDSMatchClass>;
def gds01 : gds_base <GDS01MatchClass>;
-def glc : Operand <i1> {
+class glc_base <AsmOperandClass mc> : Operand <i1> {
let PrintMethod = "printGLC";
- let ParserMatchClass = GLCMatchClass;
+ let ParserMatchClass = mc;
}
-def slc : Operand <i1> {
+
+def glc : glc_base <GLCMubufMatchClass>;
+def glc_flat : glc_base <GLCFlatMatchClass>;
+
+class slc_base <AsmOperandClass mc> : Operand <i1> {
let PrintMethod = "printSLC";
- let ParserMatchClass = SLCMatchClass;
+ let ParserMatchClass = mc;
}
-def tfe : Operand <i1> {
+
+def slc : slc_base <SLCMubufMatchClass>;
+def slc_flat : slc_base <SLCFlatMatchClass>;
+def slc_flat_atomic : slc_base <SLCFlatAtomicMatchClass>;
+
+class tfe_base <AsmOperandClass mc> : Operand <i1> {
let PrintMethod = "printTFE";
- let ParserMatchClass = TFEMatchClass;
+ let ParserMatchClass = mc;
}
+def tfe : tfe_base <TFEMubufMatchClass>;
+def tfe_flat : tfe_base <TFEFlatMatchClass>;
+def tfe_flat_atomic : tfe_base <TFEFlatAtomicMatchClass>;
+
def omod : Operand <i32> {
let PrintMethod = "printOModSI";
let ParserMatchClass = OModMatchClass;
@@ -2335,30 +2359,48 @@ multiclass MUBUF_Store_Helper <mubuf op, string name, RegisterClass vdataClass,
class FLAT_Load_Helper <bits<7> op, string asm, RegisterClass regClass> :
FLAT <op, (outs regClass:$vdst),
- (ins VReg_64:$addr),
- asm#" $vdst, $addr, [M0, FLAT_SCRATCH]", []> {
- let glc = 0;
- let slc = 0;
- let tfe = 0;
+ (ins VReg_64:$addr, glc_flat:$glc, slc_flat:$slc, tfe_flat:$tfe),
+ asm#" $vdst, $addr"#"$glc"#"$slc"#"$tfe", []> {
let data = 0;
let mayLoad = 1;
}
class FLAT_Store_Helper <bits<7> op, string name, RegisterClass vdataClass> :
- FLAT <op, (outs), (ins vdataClass:$data, VReg_64:$addr),
- name#" $data, $addr, [M0, FLAT_SCRATCH]",
+ FLAT <op, (outs), (ins vdataClass:$data, VReg_64:$addr,
+ glc_flat:$glc, slc_flat:$slc, tfe_flat:$tfe),
+ name#" $data, $addr"#"$glc"#"$slc"#"$tfe",
[]> {
let mayLoad = 0;
let mayStore = 1;
// Encoding
- let glc = 0;
- let slc = 0;
- let tfe = 0;
let vdst = 0;
}
+multiclass FLAT_ATOMIC <bits<7> op, string name, RegisterClass vdst_rc,
+ RegisterClass data_rc = vdst_rc> {
+
+ let mayLoad = 1, mayStore = 1 in {
+ def "" : FLAT <op, (outs),
+ (ins VReg_64:$addr, data_rc:$data, slc_flat_atomic:$slc,
+ tfe_flat_atomic:$tfe),
+ name#" $addr, $data"#"$slc"#"$tfe", []>,
+ AtomicNoRet <NAME, 0> {
+ let glc = 0;
+ let vdst = 0;
+ }
+
+ def _RTN : FLAT <op, (outs vdst_rc:$vdst),
+ (ins VReg_64:$addr, data_rc:$data, slc_flat_atomic:$slc,
+ tfe_flat_atomic:$tfe),
+ name#" $vdst, $addr, $data glc"#"$slc"#"$tfe", []>,
+ AtomicNoRet <NAME, 1> {
+ let glc = 1;
+ }
+ }
+}
+
class MIMG_Mask <string op, int channels> {
string Op = op;
int Channels = channels;
diff --git a/lib/Target/R600/SIInstructions.td b/lib/Target/AMDGPU/SIInstructions.td
index 2f39074802b7..8c8d836776db 100644
--- a/lib/Target/R600/SIInstructions.td
+++ b/lib/Target/AMDGPU/SIInstructions.td
@@ -32,8 +32,6 @@ def isGCN : Predicate<"Subtarget->getGeneration() "
def isSI : Predicate<"Subtarget->getGeneration() "
"== AMDGPUSubtarget::SOUTHERN_ISLANDS">;
-def HasFlatAddressSpace : Predicate<"Subtarget.hasFlatAddressSpace()">;
-
def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">;
def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">;
@@ -1154,80 +1152,6 @@ defm IMAGE_SAMPLE_C_CD_CL_O : MIMG_Sampler <0x0000006f, "image_sample_c_cd_cl_o"
//def IMAGE_SAMPLER : MIMG_NoPattern_ <"image_sampler", 0x0000007f>;
//===----------------------------------------------------------------------===//
-// Flat Instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [HasFlatAddressSpace] in {
-def FLAT_LOAD_UBYTE : FLAT_Load_Helper <0x00000008, "flat_load_ubyte", VGPR_32>;
-def FLAT_LOAD_SBYTE : FLAT_Load_Helper <0x00000009, "flat_load_sbyte", VGPR_32>;
-def FLAT_LOAD_USHORT : FLAT_Load_Helper <0x0000000a, "flat_load_ushort", VGPR_32>;
-def FLAT_LOAD_SSHORT : FLAT_Load_Helper <0x0000000b, "flat_load_sshort", VGPR_32>;
-def FLAT_LOAD_DWORD : FLAT_Load_Helper <0x0000000c, "flat_load_dword", VGPR_32>;
-def FLAT_LOAD_DWORDX2 : FLAT_Load_Helper <0x0000000d, "flat_load_dwordx2", VReg_64>;
-def FLAT_LOAD_DWORDX4 : FLAT_Load_Helper <0x0000000e, "flat_load_dwordx4", VReg_128>;
-def FLAT_LOAD_DWORDX3 : FLAT_Load_Helper <0x00000010, "flat_load_dwordx3", VReg_96>;
-
-def FLAT_STORE_BYTE : FLAT_Store_Helper <
- 0x00000018, "flat_store_byte", VGPR_32
->;
-
-def FLAT_STORE_SHORT : FLAT_Store_Helper <
- 0x0000001a, "flat_store_short", VGPR_32
->;
-
-def FLAT_STORE_DWORD : FLAT_Store_Helper <
- 0x0000001c, "flat_store_dword", VGPR_32
->;
-
-def FLAT_STORE_DWORDX2 : FLAT_Store_Helper <
- 0x0000001d, "flat_store_dwordx2", VReg_64
->;
-
-def FLAT_STORE_DWORDX4 : FLAT_Store_Helper <
- 0x0000001e, "flat_store_dwordx4", VReg_128
->;
-
-def FLAT_STORE_DWORDX3 : FLAT_Store_Helper <
- 0x0000001e, "flat_store_dwordx3", VReg_96
->;
-
-//def FLAT_ATOMIC_SWAP : FLAT_ <0x00000030, "flat_atomic_swap", []>;
-//def FLAT_ATOMIC_CMPSWAP : FLAT_ <0x00000031, "flat_atomic_cmpswap", []>;
-//def FLAT_ATOMIC_ADD : FLAT_ <0x00000032, "flat_atomic_add", []>;
-//def FLAT_ATOMIC_SUB : FLAT_ <0x00000033, "flat_atomic_sub", []>;
-//def FLAT_ATOMIC_RSUB : FLAT_ <0x00000034, "flat_atomic_rsub", []>;
-//def FLAT_ATOMIC_SMIN : FLAT_ <0x00000035, "flat_atomic_smin", []>;
-//def FLAT_ATOMIC_UMIN : FLAT_ <0x00000036, "flat_atomic_umin", []>;
-//def FLAT_ATOMIC_SMAX : FLAT_ <0x00000037, "flat_atomic_smax", []>;
-//def FLAT_ATOMIC_UMAX : FLAT_ <0x00000038, "flat_atomic_umax", []>;
-//def FLAT_ATOMIC_AND : FLAT_ <0x00000039, "flat_atomic_and", []>;
-//def FLAT_ATOMIC_OR : FLAT_ <0x0000003a, "flat_atomic_or", []>;
-//def FLAT_ATOMIC_XOR : FLAT_ <0x0000003b, "flat_atomic_xor", []>;
-//def FLAT_ATOMIC_INC : FLAT_ <0x0000003c, "flat_atomic_inc", []>;
-//def FLAT_ATOMIC_DEC : FLAT_ <0x0000003d, "flat_atomic_dec", []>;
-//def FLAT_ATOMIC_FCMPSWAP : FLAT_ <0x0000003e, "flat_atomic_fcmpswap", []>;
-//def FLAT_ATOMIC_FMIN : FLAT_ <0x0000003f, "flat_atomic_fmin", []>;
-//def FLAT_ATOMIC_FMAX : FLAT_ <0x00000040, "flat_atomic_fmax", []>;
-//def FLAT_ATOMIC_SWAP_X2 : FLAT_X2 <0x00000050, "flat_atomic_swap_x2", []>;
-//def FLAT_ATOMIC_CMPSWAP_X2 : FLAT_X2 <0x00000051, "flat_atomic_cmpswap_x2", []>;
-//def FLAT_ATOMIC_ADD_X2 : FLAT_X2 <0x00000052, "flat_atomic_add_x2", []>;
-//def FLAT_ATOMIC_SUB_X2 : FLAT_X2 <0x00000053, "flat_atomic_sub_x2", []>;
-//def FLAT_ATOMIC_RSUB_X2 : FLAT_X2 <0x00000054, "flat_atomic_rsub_x2", []>;
-//def FLAT_ATOMIC_SMIN_X2 : FLAT_X2 <0x00000055, "flat_atomic_smin_x2", []>;
-//def FLAT_ATOMIC_UMIN_X2 : FLAT_X2 <0x00000056, "flat_atomic_umin_x2", []>;
-//def FLAT_ATOMIC_SMAX_X2 : FLAT_X2 <0x00000057, "flat_atomic_smax_x2", []>;
-//def FLAT_ATOMIC_UMAX_X2 : FLAT_X2 <0x00000058, "flat_atomic_umax_x2", []>;
-//def FLAT_ATOMIC_AND_X2 : FLAT_X2 <0x00000059, "flat_atomic_and_x2", []>;
-//def FLAT_ATOMIC_OR_X2 : FLAT_X2 <0x0000005a, "flat_atomic_or_x2", []>;
-//def FLAT_ATOMIC_XOR_X2 : FLAT_X2 <0x0000005b, "flat_atomic_xor_x2", []>;
-//def FLAT_ATOMIC_INC_X2 : FLAT_X2 <0x0000005c, "flat_atomic_inc_x2", []>;
-//def FLAT_ATOMIC_DEC_X2 : FLAT_X2 <0x0000005d, "flat_atomic_dec_x2", []>;
-//def FLAT_ATOMIC_FCMPSWAP_X2 : FLAT_X2 <0x0000005e, "flat_atomic_fcmpswap_x2", []>;
-//def FLAT_ATOMIC_FMIN_X2 : FLAT_X2 <0x0000005f, "flat_atomic_fmin_x2", []>;
-//def FLAT_ATOMIC_FMAX_X2 : FLAT_X2 <0x00000060, "flat_atomic_fmax_x2", []>;
-
-} // End HasFlatAddressSpace predicate
-//===----------------------------------------------------------------------===//
// VOP1 Instructions
//===----------------------------------------------------------------------===//
@@ -3130,38 +3054,6 @@ defm V_MAD_I64_I32 : VOP3Inst <vop3<0x177>, "v_mad_i64_i32",
} // End isCI
-//===----------------------------------------------------------------------===//
-// Flat Patterns
-//===----------------------------------------------------------------------===//
-
-class FLATLoad_Pattern <FLAT Instr_ADDR64, ValueType vt,
- PatFrag flat_ld> :
- Pat <(vt (flat_ld i64:$ptr)),
- (Instr_ADDR64 $ptr)
->;
-
-def : FLATLoad_Pattern <FLAT_LOAD_SBYTE, i32, sextloadi8_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_UBYTE, i32, az_extloadi8_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_SSHORT, i32, sextloadi16_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_USHORT, i32, az_extloadi16_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORD, i32, flat_load>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORDX2, i64, flat_load>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORDX2, i64, az_extloadi32_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORDX2, v2i32, flat_load>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORDX4, v4i32, flat_load>;
-
-class FLATStore_Pattern <FLAT Instr, ValueType vt, PatFrag st> :
- Pat <(st vt:$value, i64:$ptr),
- (Instr $value, $ptr)
- >;
-
-def : FLATStore_Pattern <FLAT_STORE_BYTE, i32, truncstorei8_flat>;
-def : FLATStore_Pattern <FLAT_STORE_SHORT, i32, truncstorei16_flat>;
-def : FLATStore_Pattern <FLAT_STORE_DWORD, i32, flat_store>;
-def : FLATStore_Pattern <FLAT_STORE_DWORDX2, i64, flat_store>;
-def : FLATStore_Pattern <FLAT_STORE_DWORDX2, v2i32, flat_store>;
-def : FLATStore_Pattern <FLAT_STORE_DWORDX4, v4i32, flat_store>;
-
/********** ====================== **********/
/********** Indirect adressing **********/
/********** ====================== **********/
diff --git a/lib/Target/R600/SIIntrinsics.td b/lib/Target/AMDGPU/SIIntrinsics.td
index 027a0a2f5167..027a0a2f5167 100644
--- a/lib/Target/R600/SIIntrinsics.td
+++ b/lib/Target/AMDGPU/SIIntrinsics.td
diff --git a/lib/Target/R600/SILoadStoreOptimizer.cpp b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index 9b1d256dc5a8..9b1d256dc5a8 100644
--- a/lib/Target/R600/SILoadStoreOptimizer.cpp
+++ b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
diff --git a/lib/Target/R600/SILowerControlFlow.cpp b/lib/Target/AMDGPU/SILowerControlFlow.cpp
index c319b32111fe..c319b32111fe 100644
--- a/lib/Target/R600/SILowerControlFlow.cpp
+++ b/lib/Target/AMDGPU/SILowerControlFlow.cpp
diff --git a/lib/Target/R600/SILowerI1Copies.cpp b/lib/Target/AMDGPU/SILowerI1Copies.cpp
index 67421e231d8d..67421e231d8d 100644
--- a/lib/Target/R600/SILowerI1Copies.cpp
+++ b/lib/Target/AMDGPU/SILowerI1Copies.cpp
diff --git a/lib/Target/R600/SIMachineFunctionInfo.cpp b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 587ea63d6796..587ea63d6796 100644
--- a/lib/Target/R600/SIMachineFunctionInfo.cpp
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
diff --git a/lib/Target/R600/SIMachineFunctionInfo.h b/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index 667da4c8af61..667da4c8af61 100644
--- a/lib/Target/R600/SIMachineFunctionInfo.h
+++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.h
diff --git a/lib/Target/R600/SIPrepareScratchRegs.cpp b/lib/Target/AMDGPU/SIPrepareScratchRegs.cpp
index 0a7f684552f0..0a7f684552f0 100644
--- a/lib/Target/R600/SIPrepareScratchRegs.cpp
+++ b/lib/Target/AMDGPU/SIPrepareScratchRegs.cpp
diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp
index db2ff0b1f952..db2ff0b1f952 100644
--- a/lib/Target/R600/SIRegisterInfo.cpp
+++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp
diff --git a/lib/Target/R600/SIRegisterInfo.h b/lib/Target/AMDGPU/SIRegisterInfo.h
index bfdb67c5e12b..bfdb67c5e12b 100644
--- a/lib/Target/R600/SIRegisterInfo.h
+++ b/lib/Target/AMDGPU/SIRegisterInfo.h
diff --git a/lib/Target/R600/SIRegisterInfo.td b/lib/Target/AMDGPU/SIRegisterInfo.td
index 2a9017fa2a98..2a9017fa2a98 100644
--- a/lib/Target/R600/SIRegisterInfo.td
+++ b/lib/Target/AMDGPU/SIRegisterInfo.td
diff --git a/lib/Target/R600/SISchedule.td b/lib/Target/AMDGPU/SISchedule.td
index 9b1f676020bf..9b1f676020bf 100644
--- a/lib/Target/R600/SISchedule.td
+++ b/lib/Target/AMDGPU/SISchedule.td
diff --git a/lib/Target/R600/SIShrinkInstructions.cpp b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 51e72cdb5f9e..51e72cdb5f9e 100644
--- a/lib/Target/R600/SIShrinkInstructions.cpp
+++ b/lib/Target/AMDGPU/SIShrinkInstructions.cpp
diff --git a/lib/Target/R600/SITypeRewriter.cpp b/lib/Target/AMDGPU/SITypeRewriter.cpp
index 591ce857cc7d..591ce857cc7d 100644
--- a/lib/Target/R600/SITypeRewriter.cpp
+++ b/lib/Target/AMDGPU/SITypeRewriter.cpp
diff --git a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp b/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp
index d723d6e3e8b7..2112135aa5d4 100644
--- a/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
+++ b/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp
@@ -23,7 +23,7 @@ Target llvm::TheAMDGPUTarget;
Target llvm::TheGCNTarget;
/// \brief Extern function to initialize the targets for the AMDGPU backend
-extern "C" void LLVMInitializeR600TargetInfo() {
+extern "C" void LLVMInitializeAMDGPUTargetInfo() {
RegisterTarget<Triple::r600, false>
R600(TheAMDGPUTarget, "r600", "AMD GPUs HD2XXX-HD6XXX");
RegisterTarget<Triple::amdgcn, false> GCN(TheGCNTarget, "amdgcn", "AMD GCN GPUs");
diff --git a/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt b/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt
new file mode 100644
index 000000000000..961dc5509000
--- /dev/null
+++ b/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMAMDGPUInfo
+ AMDGPUTargetInfo.cpp
+ )
diff --git a/lib/Target/R600/TargetInfo/LLVMBuild.txt b/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt
index c3d3cf51cc8e..291317fa072f 100644
--- a/lib/Target/R600/TargetInfo/LLVMBuild.txt
+++ b/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt
@@ -1,4 +1,4 @@
-;===- ./lib/Target/R600/TargetInfo/LLVMBuild.txt --------------*- Conf -*--===;
+;===- ./lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt --------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
@@ -17,7 +17,7 @@
[component_0]
type = Library
-name = R600Info
-parent = R600
+name = AMDGPUInfo
+parent = AMDGPU
required_libraries = Support
-add_to_library_groups = R600
+add_to_library_groups = AMDGPU
diff --git a/lib/Target/R600/TargetInfo/Makefile b/lib/Target/AMDGPU/TargetInfo/Makefile
index b8ac4e782302..1b232871bd62 100644
--- a/lib/Target/R600/TargetInfo/Makefile
+++ b/lib/Target/AMDGPU/TargetInfo/Makefile
@@ -7,7 +7,7 @@
#
##===----------------------------------------------------------------------===##
LEVEL = ../../../..
-LIBRARYNAME = LLVMR600Info
+LIBRARYNAME = LLVMAMDGPUInfo
# Hack: we need to include 'main' target directory to grab private headers
CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
diff --git a/lib/Target/R600/VIInstrFormats.td b/lib/Target/AMDGPU/VIInstrFormats.td
index d8738f992630..d8738f992630 100644
--- a/lib/Target/R600/VIInstrFormats.td
+++ b/lib/Target/AMDGPU/VIInstrFormats.td
diff --git a/lib/Target/R600/VIInstructions.td b/lib/Target/AMDGPU/VIInstructions.td
index 5bf86e649ce0..5bf86e649ce0 100644
--- a/lib/Target/R600/VIInstructions.td
+++ b/lib/Target/AMDGPU/VIInstructions.td
diff --git a/lib/Target/ARM/ARM.h b/lib/Target/ARM/ARM.h
index 9550a3a3cad1..d554fe5d4465 100644
--- a/lib/Target/ARM/ARM.h
+++ b/lib/Target/ARM/ARM.h
@@ -46,6 +46,6 @@ FunctionPass *createThumb2SizeReductionPass(
void LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
ARMAsmPrinter &AP);
-} // end namespace llvm;
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp
index d84f2961d810..4530e4155ae2 100644
--- a/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -429,7 +429,7 @@ void ARMAsmPrinter::emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
}
void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) {
- Triple TT(TM.getTargetTriple());
+ const Triple &TT = TM.getTargetTriple();
// Use unified assembler syntax.
OutStreamer->EmitAssemblerFlag(MCAF_SyntaxUnified);
@@ -473,7 +473,7 @@ emitNonLazySymbolPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel,
void ARMAsmPrinter::EmitEndOfAsmFile(Module &M) {
- Triple TT(TM.getTargetTriple());
+ const Triple &TT = TM.getTargetTriple();
if (TT.isOSBinFormatMachO()) {
// All darwin targets use mach-o.
const TargetLoweringObjectFileMachO &TLOFMacho =
@@ -564,7 +564,7 @@ void ARMAsmPrinter::emitAttributes() {
// anyhow.
// FIXME: For ifunc related functions we could iterate over and look
// for a feature string that doesn't match the default one.
- StringRef TT = TM.getTargetTriple();
+ const Triple &TT = TM.getTargetTriple();
StringRef CPU = TM.getTargetCPU();
StringRef FS = TM.getTargetFeatureString();
std::string ArchFS = ARM_MC::ParseARMTriple(TT, CPU);
diff --git a/lib/Target/ARM/ARMAsmPrinter.h b/lib/Target/ARM/ARMAsmPrinter.h
index a6bc3683c8b9..3d251213f5bf 100644
--- a/lib/Target/ARM/ARMAsmPrinter.h
+++ b/lib/Target/ARM/ARMAsmPrinter.h
@@ -105,7 +105,7 @@ private:
public:
unsigned getISAEncoding() override {
// ARM/Darwin adds ISA to the DWARF info for each function.
- Triple TT(TM.getTargetTriple());
+ const Triple &TT = TM.getTargetTriple();
if (!TT.isOSBinFormatMachO())
return 0;
bool isThumb = TT.getArch() == Triple::thumb ||
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 9c4b4961fe8c..f2b7a6419be3 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -28,6 +28,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
@@ -396,7 +397,7 @@ unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
int BOpc = !AFI->isThumbFunction()
@@ -458,8 +459,7 @@ bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
}
bool ARMBaseInstrInfo::
-PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const {
+PredicateInstruction(MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
unsigned Opc = MI->getOpcode();
if (isUncondBranchOpcode(Opc)) {
MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
@@ -479,9 +479,8 @@ PredicateInstruction(MachineInstr *MI,
return false;
}
-bool ARMBaseInstrInfo::
-SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const {
+bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const {
if (Pred1.size() > 2 || Pred2.size() > 2)
return false;
@@ -595,7 +594,7 @@ template <> bool IsCPSRDead<MachineInstr>(MachineInstr *MI) {
// all definitions of CPSR are dead
return true;
}
-}
+} // namespace llvm
/// GetInstSize - Return the size of the specified MachineInstr.
///
@@ -3995,7 +3994,7 @@ int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
}
bool ARMBaseInstrInfo::
-hasHighOperandLatency(const InstrItineraryData *ItinData,
+hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const {
@@ -4007,9 +4006,8 @@ hasHighOperandLatency(const InstrItineraryData *ItinData,
return true;
// Hoist VFP / NEON instructions with 4 or higher latency.
- int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
- if (Latency < 0)
- Latency = getInstrLatency(ItinData, DefMI);
+ unsigned Latency
+ = SchedModel.computeOperandLatency(DefMI, DefIdx, UseMI, UseIdx);
if (Latency <= 3)
return false;
return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
@@ -4017,8 +4015,9 @@ hasHighOperandLatency(const InstrItineraryData *ItinData,
}
bool ARMBaseInstrInfo::
-hasLowDefLatency(const InstrItineraryData *ItinData,
+hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI, unsigned DefIdx) const {
+ const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
if (!ItinData || ItinData->isEmpty())
return false;
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h
index c7185fed8e95..6fc0edd101b9 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -116,8 +116,7 @@ public:
bool AllowModify = false) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
bool
@@ -133,10 +132,10 @@ public:
}
bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const override;
+ ArrayRef<MachineOperand> Pred) const override;
- bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const override;
+ bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const override;
bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const override;
@@ -328,12 +327,12 @@ private:
int getInstrLatency(const InstrItineraryData *ItinData,
SDNode *Node) const override;
- bool hasHighOperandLatency(const InstrItineraryData *ItinData,
+ bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
unsigned UseIdx) const override;
- bool hasLowDefLatency(const InstrItineraryData *ItinData,
+ bool hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI,
unsigned DefIdx) const override;
@@ -494,6 +493,6 @@ bool rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, int &Offset,
const ARMBaseInstrInfo &TII);
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/ARMCallingConv.h b/lib/Target/ARM/ARMCallingConv.h
index d687568d7eb9..2edb96adba42 100644
--- a/lib/Target/ARM/ARMCallingConv.h
+++ b/lib/Target/ARM/ARMCallingConv.h
@@ -281,6 +281,6 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo, MVT &ValVT,
return true;
}
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp
index f4ec8c67c977..cb4eeb5fc43d 100644
--- a/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -335,7 +335,7 @@ namespace {
}
};
char ARMConstantIslands::ID = 0;
-}
+} // namespace
/// verify - check BBOffsets, BBSizes, alignment of islands
void ARMConstantIslands::verify() {
diff --git a/lib/Target/ARM/ARMConstantPoolValue.h b/lib/Target/ARM/ARMConstantPoolValue.h
index 36f63e239a9e..b429bed9ff25 100644
--- a/lib/Target/ARM/ARMConstantPoolValue.h
+++ b/lib/Target/ARM/ARMConstantPoolValue.h
@@ -44,7 +44,7 @@ namespace ARMCP {
GOTTPOFF,
TPOFF
};
-}
+} // namespace ARMCP
/// ARMConstantPoolValue - ARM specific constantpool value. This is used to
/// represent PC-relative displacement between the address of the load
@@ -254,6 +254,6 @@ public:
}
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 4438f50758dc..963b46c98e00 100644
--- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -69,7 +69,7 @@ namespace {
MachineBasicBlock::iterator &MBBI);
};
char ARMExpandPseudo::ID = 0;
-}
+} // namespace
/// TransferImpOps - Transfer implicit operands on the pseudo instruction to
/// the instructions created from the expansion.
@@ -129,7 +129,7 @@ namespace {
return PseudoOpc < TE.PseudoOpc;
}
};
-}
+} // namespace
static const NEONLdStTableEntry NEONLdStTable[] = {
{ ARM::VLD1LNq16Pseudo, ARM::VLD1LNd16, true, false, false, EvenDblSpc, 1, 4 ,true},
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 4175b4af86e6..cead18f97d74 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -2898,7 +2898,7 @@ const struct FoldableLoadExtendsStruct {
{ { ARM::SXTB, ARM::t2SXTB }, 0, 0, MVT::i8 },
{ { ARM::UXTB, ARM::t2UXTB }, 0, 1, MVT::i8 }
};
-}
+} // namespace
/// \brief The specified machine instr operand is a vreg, and that
/// vreg is being provided by the specified load instruction. If possible,
diff --git a/lib/Target/ARM/ARMFeatures.h b/lib/Target/ARM/ARMFeatures.h
index 0c910ab6130f..5b4a44c72030 100644
--- a/lib/Target/ARM/ARMFeatures.h
+++ b/lib/Target/ARM/ARMFeatures.h
@@ -92,6 +92,6 @@ inline bool isV8EligibleForIT(InstrType *Instr) {
}
}
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index a52e49780e27..091086d3c429 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -221,7 +221,7 @@ struct StackAdjustingInsts {
}
}
};
-}
+} // namespace
/// Emit an instruction sequence that will align the address in
/// register Reg by zero-ing out the lower bits. For versions of the
diff --git a/lib/Target/ARM/ARMFrameLowering.h b/lib/Target/ARM/ARMFrameLowering.h
index d763d17a506f..98313e60e234 100644
--- a/lib/Target/ARM/ARMFrameLowering.h
+++ b/lib/Target/ARM/ARMFrameLowering.h
@@ -78,6 +78,6 @@ public:
MachineBasicBlock::iterator MI) const override;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 50afb192b331..575a9d930675 100644
--- a/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -279,7 +279,7 @@ private:
SDValue GetVLDSTAlign(SDValue Align, SDLoc dl, unsigned NumVecs,
bool is64BitVector);
};
-}
+} // namespace
/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
/// operand. If so Imm will receive the 32-bit value.
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 47c8400a668f..94a026bf2cc8 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -83,7 +83,7 @@ namespace {
CallOrPrologue = PC;
}
};
-}
+} // namespace
// The APCS parameter registers.
static const MCPhysReg GPRArgRegs[] = {
@@ -1483,9 +1483,10 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
bool isThisReturn = false;
bool isSibCall = false;
+ auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
// Disable tail calls if they're not supported.
- if (!Subtarget->supportsTailCall() || MF.getTarget().Options.DisableTailCalls)
+ if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true")
isTailCall = false;
if (isTailCall) {
@@ -2042,7 +2043,7 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// cannot rely on the linker replacing the tail call with a return.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
const GlobalValue *GV = G->getGlobal();
- const Triple TT(getTargetMachine().getTargetTriple());
+ const Triple &TT = getTargetMachine().getTargetTriple();
if (GV->hasExternalWeakLinkage() &&
(!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
return false;
@@ -2375,7 +2376,9 @@ bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
if (!Subtarget->supportsTailCall())
return false;
- if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
+ auto Attr =
+ CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
+ if (!CI->isTailCall() || Attr.getValueAsString() == "true")
return false;
return !Subtarget->isThumb1Only();
@@ -5060,6 +5063,30 @@ static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
return true;
}
+/// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
+/// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
+static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
+ unsigned &WhichResult,
+ bool &isV_UNDEF) {
+ isV_UNDEF = false;
+ if (isVTRNMask(ShuffleMask, VT, WhichResult))
+ return ARMISD::VTRN;
+ if (isVUZPMask(ShuffleMask, VT, WhichResult))
+ return ARMISD::VUZP;
+ if (isVZIPMask(ShuffleMask, VT, WhichResult))
+ return ARMISD::VZIP;
+
+ isV_UNDEF = true;
+ if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
+ return ARMISD::VTRN;
+ if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
+ return ARMISD::VUZP;
+ if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
+ return ARMISD::VZIP;
+
+ return 0;
+}
+
/// \return true if this is a reverse operation on an vector.
static bool isReverseMask(ArrayRef<int> M, EVT VT) {
unsigned NumElts = VT.getVectorNumElements();
@@ -5476,7 +5503,7 @@ ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
return true;
}
- bool ReverseVEXT;
+ bool ReverseVEXT, isV_UNDEF;
unsigned Imm, WhichResult;
unsigned EltSize = VT.getVectorElementType().getSizeInBits();
@@ -5487,12 +5514,7 @@ ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
isVREVMask(M, VT, 16) ||
isVEXTMask(M, VT, ReverseVEXT, Imm) ||
isVTBLMask(M, VT) ||
- isVTRNMask(M, VT, WhichResult) ||
- isVUZPMask(M, VT, WhichResult) ||
- isVZIPMask(M, VT, WhichResult) ||
- isVTRN_v_undef_Mask(M, VT, WhichResult) ||
- isVUZP_v_undef_Mask(M, VT, WhichResult) ||
- isVZIP_v_undef_Mask(M, VT, WhichResult) ||
+ isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) ||
((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT)));
}
@@ -5684,25 +5706,53 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
// these operations, DAG memoization will ensure that a single node is
// used for both shuffles.
unsigned WhichResult;
- if (isVTRNMask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
- V1, V2).getValue(WhichResult);
- if (isVUZPMask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
- V1, V2).getValue(WhichResult);
- if (isVZIPMask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
- V1, V2).getValue(WhichResult);
-
- if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
- V1, V1).getValue(WhichResult);
- if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
- V1, V1).getValue(WhichResult);
- if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
- return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
- V1, V1).getValue(WhichResult);
+ bool isV_UNDEF;
+ if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
+ ShuffleMask, VT, WhichResult, isV_UNDEF)) {
+ if (isV_UNDEF)
+ V2 = V1;
+ return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
+ .getValue(WhichResult);
+ }
+
+ // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
+ // shuffles that produce a result larger than their operands with:
+ // shuffle(concat(v1, undef), concat(v2, undef))
+ // ->
+ // shuffle(concat(v1, v2), undef)
+ // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
+ //
+ // This is useful in the general case, but there are special cases where
+ // native shuffles produce larger results: the two-result ops.
+ //
+ // Look through the concat when lowering them:
+ // shuffle(concat(v1, v2), undef)
+ // ->
+ // concat(VZIP(v1, v2):0, :1)
+ //
+ if (V1->getOpcode() == ISD::CONCAT_VECTORS &&
+ V2->getOpcode() == ISD::UNDEF) {
+ SDValue SubV1 = V1->getOperand(0);
+ SDValue SubV2 = V1->getOperand(1);
+ EVT SubVT = SubV1.getValueType();
+
+ // We expect these to have been canonicalized to -1.
+ assert(std::all_of(ShuffleMask.begin(), ShuffleMask.end(), [&](int i) {
+ return i < (int)VT.getVectorNumElements();
+ }) && "Unexpected shuffle index into UNDEF operand!");
+
+ if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
+ ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
+ if (isV_UNDEF)
+ SubV2 = SubV1;
+ assert((WhichResult == 0) &&
+ "In-place shuffle of concat can only have one result!");
+ SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
+ SubV1, SubV2);
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
+ Res.getValue(1));
+ }
+ }
}
// If the shuffle is not directly supported and it has 4 elements, use
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index c0b329c5a1e5..71a47a2cb81b 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -215,7 +215,7 @@ namespace llvm {
VST3LN_UPD,
VST4LN_UPD
};
- }
+ } // namespace ARMISD
/// Define some predicates that are used for node matching.
namespace ARM {
@@ -638,6 +638,6 @@ namespace llvm {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
}
-}
+} // namespace llvm
#endif // ARMISELLOWERING_H
diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp
index 84f95be30991..59e1535a6fe6 100644
--- a/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/lib/Target/ARM/ARMInstrInfo.cpp
@@ -198,7 +198,7 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char ARMCGBR::ID = 0;
FunctionPass*
diff --git a/lib/Target/ARM/ARMInstrInfo.h b/lib/Target/ARM/ARMInstrInfo.h
index 90f34ea08401..9e5700a256bd 100644
--- a/lib/Target/ARM/ARMInstrInfo.h
+++ b/lib/Target/ARM/ARMInstrInfo.h
@@ -43,6 +43,6 @@ private:
Reloc::Model RM) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 46ff326ba630..50e2292b8b6e 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -142,7 +142,7 @@ namespace {
bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
};
char ARMLoadStoreOpt::ID = 0;
-}
+} // namespace
static bool definesCPSR(const MachineInstr *MI) {
for (const auto &MO : MI->operands()) {
@@ -1859,7 +1859,7 @@ namespace {
bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
};
char ARMPreAllocLoadStoreOpt::ID = 0;
-}
+} // namespace
bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
TD = Fn.getTarget().getDataLayout();
diff --git a/lib/Target/ARM/ARMMachineFunctionInfo.h b/lib/Target/ARM/ARMMachineFunctionInfo.h
index 14dd9ef333af..8b1210268eb2 100644
--- a/lib/Target/ARM/ARMMachineFunctionInfo.h
+++ b/lib/Target/ARM/ARMMachineFunctionInfo.h
@@ -229,6 +229,6 @@ public:
return It;
}
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
index 30baf4263c11..1c8e1f8b1412 100644
--- a/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
+++ b/lib/Target/ARM/ARMOptimizeBarriersPass.cpp
@@ -32,7 +32,7 @@ public:
}
};
char ARMOptimizeBarriersPass::ID = 0;
-}
+} // namespace
// Returns whether the instruction can safely move past a DMB instruction
// The current implementation allows this iif MI does not have any possible
diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.h b/lib/Target/ARM/ARMSelectionDAGInfo.h
index 1db190f41e1a..4563caae9ffe 100644
--- a/lib/Target/ARM/ARMSelectionDAGInfo.h
+++ b/lib/Target/ARM/ARMSelectionDAGInfo.h
@@ -70,6 +70,6 @@ public:
RTLIB::Libcall LC) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp
index f20318d133f4..55808dfb9efe 100644
--- a/lib/Target/ARM/ARMSubtarget.cpp
+++ b/lib/Target/ARM/ARMSubtarget.cpp
@@ -106,7 +106,7 @@ ARMFrameLowering *ARMSubtarget::initializeFrameLowering(StringRef CPU,
return new ARMFrameLowering(STI);
}
-ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
+ARMSubtarget::ARMSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS,
const ARMBaseTargetMachine &TM, bool IsLittle)
: ARMGenSubtargetInfo(TT, CPU, FS), ARMProcFamily(Others),
@@ -187,8 +187,7 @@ void ARMSubtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
// Insert the architecture feature derived from the target triple into the
// feature string. This is important for setting features that are implied
// based on the architecture version.
- std::string ArchFS =
- ARM_MC::ParseARMTriple(TargetTriple.getTriple(), CPUString);
+ std::string ArchFS = ARM_MC::ParseARMTriple(TargetTriple, CPUString);
if (!FS.empty()) {
if (!ArchFS.empty())
ArchFS = (Twine(ArchFS) + "," + FS).str();
@@ -338,7 +337,7 @@ bool ARMSubtarget::hasSinCos() const {
}
// This overrides the PostRAScheduler bit in the SchedModel for any CPU.
-bool ARMSubtarget::enablePostMachineScheduler() const {
+bool ARMSubtarget::enablePostRAScheduler() const {
return (!isThumb() || hasThumb2());
}
diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h
index 77ceb081db16..f00594f82012 100644
--- a/lib/Target/ARM/ARMSubtarget.h
+++ b/lib/Target/ARM/ARMSubtarget.h
@@ -237,8 +237,8 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
- ARMSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const ARMBaseTargetMachine &TM, bool IsLittle);
+ ARMSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
+ const ARMBaseTargetMachine &TM, bool IsLittle);
/// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
/// that still makes it profitable to inline the call.
@@ -430,7 +430,7 @@ public:
bool hasSinCos() const;
/// True for some subtargets at > -O0.
- bool enablePostMachineScheduler() const override;
+ bool enablePostRAScheduler() const override;
// enableAtomicExpand- True if we need to expand our atomics.
bool enableAtomicExpand() const override;
@@ -453,6 +453,6 @@ public:
/// True if fast-isel is used.
bool useFastISel() const;
};
-} // End llvm namespace
+} // namespace llvm
#endif // ARMSUBTARGET_H
diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp
index 0aceaed87510..104a34f97e5e 100644
--- a/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/lib/Target/ARM/ARMTargetMachine.cpp
@@ -115,11 +115,10 @@ computeTargetABI(const Triple &TT, StringRef CPU,
return TargetABI;
}
-static std::string computeDataLayout(StringRef TT, StringRef CPU,
+static std::string computeDataLayout(const Triple &TT, StringRef CPU,
const TargetOptions &Options,
bool isLittle) {
- const Triple Triple(TT);
- auto ABI = computeTargetABI(Triple, CPU, Options);
+ auto ABI = computeTargetABI(TT, CPU, Options);
std::string Ret = "";
if (isLittle)
@@ -129,7 +128,7 @@ static std::string computeDataLayout(StringRef TT, StringRef CPU,
// Big endian.
Ret += "E";
- Ret += DataLayout::getManglingComponent(Triple);
+ Ret += DataLayout::getManglingComponent(TT);
// Pointers are 32 bits and aligned to 32 bits.
Ret += "-p:32:32";
@@ -159,7 +158,7 @@ static std::string computeDataLayout(StringRef TT, StringRef CPU,
// The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
// aligned everywhere else.
- if (Triple.isOSNaCl())
+ if (TT.isOSNaCl())
Ret += "-S128";
else if (ABI == ARMBaseTargetMachine::ARM_ABI_AAPCS)
Ret += "-S64";
@@ -171,15 +170,15 @@ static std::string computeDataLayout(StringRef TT, StringRef CPU,
/// TargetMachine ctor - Create an ARM architecture model.
///
-ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT,
+ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool isLittle)
: LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT,
CPU, FS, Options, RM, CM, OL),
- TargetABI(computeTargetABI(Triple(TT), CPU, Options)),
- TLOF(createTLOF(Triple(getTargetTriple()))),
+ TargetABI(computeTargetABI(TT, CPU, Options)),
+ TLOF(createTLOF(getTargetTriple())),
Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) {
// Default to triple-appropriate float ABI
@@ -234,8 +233,9 @@ TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() {
void ARMTargetMachine::anchor() { }
-ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options,
+ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool isLittle)
: ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) {
@@ -247,7 +247,7 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU,
void ARMLETargetMachine::anchor() { }
-ARMLETargetMachine::ARMLETargetMachine(const Target &T, StringRef TT,
+ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
@@ -256,7 +256,7 @@ ARMLETargetMachine::ARMLETargetMachine(const Target &T, StringRef TT,
void ARMBETargetMachine::anchor() { }
-ARMBETargetMachine::ARMBETargetMachine(const Target &T, StringRef TT,
+ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
@@ -265,19 +265,18 @@ ARMBETargetMachine::ARMBETargetMachine(const Target &T, StringRef TT,
void ThumbTargetMachine::anchor() { }
-ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT,
+ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool isLittle)
- : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL,
- isLittle) {
+ : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) {
initAsmInfo();
}
void ThumbLETargetMachine::anchor() { }
-ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, StringRef TT,
+ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
@@ -286,7 +285,7 @@ ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, StringRef TT,
void ThumbBETargetMachine::anchor() { }
-ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, StringRef TT,
+ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
@@ -355,8 +354,7 @@ bool ARMPassConfig::addPreISel() {
bool ARMPassConfig::addInstSelector() {
addPass(createARMISelDag(getARMTargetMachine(), getOptLevel()));
- if (Triple(TM->getTargetTriple()).isOSBinFormatELF() &&
- TM->Options.EnableFastISel)
+ if (TM->getTargetTriple().isOSBinFormatELF() && TM->Options.EnableFastISel)
addPass(createARMGlobalBaseRegPass());
return false;
}
diff --git a/lib/Target/ARM/ARMTargetMachine.h b/lib/Target/ARM/ARMTargetMachine.h
index 20ca97b616b7..8c98e082ce9a 100644
--- a/lib/Target/ARM/ARMTargetMachine.h
+++ b/lib/Target/ARM/ARMTargetMachine.h
@@ -36,12 +36,10 @@ protected:
mutable StringMap<std::unique_ptr<ARMSubtarget>> SubtargetMap;
public:
- ARMBaseTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
+ ARMBaseTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL,
- bool isLittle);
+ CodeGenOpt::Level OL, bool isLittle);
~ARMBaseTargetMachine() override;
const ARMSubtarget *getSubtargetImpl() const { return &Subtarget; }
@@ -64,8 +62,8 @@ public:
class ARMTargetMachine : public ARMBaseTargetMachine {
virtual void anchor();
public:
- ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM,
+ ARMTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle);
};
@@ -74,8 +72,8 @@ class ARMTargetMachine : public ARMBaseTargetMachine {
class ARMLETargetMachine : public ARMTargetMachine {
void anchor() override;
public:
- ARMLETargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
+ ARMLETargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
@@ -85,9 +83,10 @@ public:
class ARMBETargetMachine : public ARMTargetMachine {
void anchor() override;
public:
- ARMBETargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL);
+ ARMBETargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
};
/// ThumbTargetMachine - Thumb target machine.
@@ -97,9 +96,10 @@ public:
class ThumbTargetMachine : public ARMBaseTargetMachine {
virtual void anchor();
public:
- ThumbTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle);
+ ThumbTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL,
+ bool isLittle);
};
/// ThumbLETargetMachine - Thumb little endian target machine.
@@ -107,7 +107,7 @@ public:
class ThumbLETargetMachine : public ThumbTargetMachine {
void anchor() override;
public:
- ThumbLETargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ ThumbLETargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
@@ -118,7 +118,7 @@ public:
class ThumbBETargetMachine : public ThumbTargetMachine {
void anchor() override;
public:
- ThumbBETargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ ThumbBETargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 8bcbb1159f81..35387d3e6cf1 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -5841,7 +5841,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// do and don't have a cc_out optional-def operand. With some spot-checks
// of the operand list, we can figure out which variant we're trying to
// parse and adjust accordingly before actually matching. We shouldn't ever
- // try to remove a cc_out operand that was explicitly set on the the
+ // try to remove a cc_out operand that was explicitly set on the
// mnemonic, of course (CarrySetting == true). Reason number #317 the
// table driven matcher doesn't fit well with the ARM instruction set.
if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
index 097ec04e7052..f973a8de8bcf 100644
--- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
+++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp
@@ -81,7 +81,7 @@ namespace {
private:
std::vector<unsigned char> ITStates;
};
-}
+} // namespace
namespace {
/// ARM disassembler for all ARM platforms.
@@ -118,7 +118,7 @@ private:
DecodeStatus AddThumbPredicate(MCInst&) const;
void UpdateThumbVFPPredicate(MCInst&) const;
};
-}
+} // namespace
static bool Check(DecodeStatus &Out, DecodeStatus In) {
switch (In) {
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index be23e9070103..111463588565 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -744,10 +744,9 @@ void ARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
}
MCAsmBackend *llvm::createARMAsmBackend(const Target &T,
- const MCRegisterInfo &MRI, StringRef TT,
- StringRef CPU, bool isLittle) {
- Triple TheTriple(TT);
-
+ const MCRegisterInfo &MRI,
+ const Triple &TheTriple, StringRef CPU,
+ bool isLittle) {
switch (TheTriple.getObjectFormat()) {
default:
llvm_unreachable("unsupported object format");
@@ -764,38 +763,38 @@ MCAsmBackend *llvm::createARMAsmBackend(const Target &T,
.Cases("armv7s", "thumbv7s", MachO::CPU_SUBTYPE_ARM_V7S)
.Default(MachO::CPU_SUBTYPE_ARM_V7);
- return new ARMAsmBackendDarwin(T, TT, CS);
+ return new ARMAsmBackendDarwin(T, TheTriple, CS);
}
case Triple::COFF:
assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
- return new ARMAsmBackendWinCOFF(T, TT);
+ return new ARMAsmBackendWinCOFF(T, TheTriple);
case Triple::ELF:
assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
- uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
- return new ARMAsmBackendELF(T, TT, OSABI, isLittle);
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
+ return new ARMAsmBackendELF(T, TheTriple, OSABI, isLittle);
}
}
MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
+ const Triple &TT, StringRef CPU) {
return createARMAsmBackend(T, MRI, TT, CPU, true);
}
MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
+ const Triple &TT, StringRef CPU) {
return createARMAsmBackend(T, MRI, TT, CPU, false);
}
MCAsmBackend *llvm::createThumbLEAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
+ const Triple &TT, StringRef CPU) {
return createARMAsmBackend(T, MRI, TT, CPU, true);
}
MCAsmBackend *llvm::createThumbBEAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
+ const Triple &TT, StringRef CPU) {
return createARMAsmBackend(T, MRI, TT, CPU, false);
}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
index 4e6037213034..6b4abd5898eb 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h
@@ -23,9 +23,10 @@ class ARMAsmBackend : public MCAsmBackend {
bool isThumbMode; // Currently emitting Thumb code.
bool IsLittleEndian; // Big or little endian.
public:
- ARMAsmBackend(const Target &T, StringRef TT, bool IsLittle)
+ ARMAsmBackend(const Target &T, const Triple &TT, bool IsLittle)
: MCAsmBackend(), STI(ARM_MC::createARMMCSubtargetInfo(TT, "", "")),
- isThumbMode(TT.startswith("thumb")), IsLittleEndian(IsLittle) {}
+ isThumbMode(TT.getArchName().startswith("thumb")),
+ IsLittleEndian(IsLittle) {}
~ARMAsmBackend() override { delete STI; }
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h
index ebef78937b5a..e28f6e097421 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendDarwin.h
@@ -18,7 +18,8 @@ namespace {
class ARMAsmBackendDarwin : public ARMAsmBackend {
public:
const MachO::CPUSubTypeARM Subtype;
- ARMAsmBackendDarwin(const Target &T, StringRef TT, MachO::CPUSubTypeARM st)
+ ARMAsmBackendDarwin(const Target &T, const Triple &TT,
+ MachO::CPUSubTypeARM st)
: ARMAsmBackend(T, TT, /* IsLittleEndian */ true), Subtype(st) {
HasDataInCodeSupport = true;
}
@@ -28,6 +29,6 @@ public:
Subtype);
}
};
-}
+} // namespace
#endif
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h
index 263c4c488acb..412feb8873ca 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendELF.h
@@ -15,13 +15,14 @@ namespace {
class ARMAsmBackendELF : public ARMAsmBackend {
public:
uint8_t OSABI;
- ARMAsmBackendELF(const Target &T, StringRef TT, uint8_t OSABI, bool IsLittle)
+ ARMAsmBackendELF(const Target &T, const Triple &TT, uint8_t OSABI,
+ bool IsLittle)
: ARMAsmBackend(T, TT, IsLittle), OSABI(OSABI) {}
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createARMELFObjectWriter(OS, OSABI, isLittle());
}
};
-}
+} // namespace
#endif
diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h
index f2c435820ad6..170f59a4c905 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackendWinCOFF.h
@@ -15,8 +15,8 @@ using namespace llvm;
namespace {
class ARMAsmBackendWinCOFF : public ARMAsmBackend {
public:
- ARMAsmBackendWinCOFF(const Target &T, StringRef Triple)
- : ARMAsmBackend(T, Triple, true) {}
+ ARMAsmBackendWinCOFF(const Target &T, const Triple &TheTriple)
+ : ARMAsmBackend(T, TheTriple, true) {}
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createARMWinCOFFObjectWriter(OS, /*Is64Bit=*/false);
}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
index 4289a73e9d6b..1975bcaa234e 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
@@ -114,7 +114,7 @@ namespace ARM_PROC {
case ID: return "id";
}
}
-}
+} // namespace ARM_PROC
namespace ARM_MB {
// The Memory Barrier Option constants map directly to the 4-bit encoding of
@@ -459,6 +459,6 @@ namespace ARMII {
} // end namespace ARMII
-} // end namespace llvm;
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
index 804d3534096a..9fe27fbcff4a 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp
@@ -40,7 +40,7 @@ namespace {
bool needsRelocateWithSymbol(const MCSymbol &Sym,
unsigned Type) const override;
};
-}
+} // namespace
ARMELFObjectWriter::ARMELFObjectWriter(uint8_t OSABI)
: MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI,
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 6e3af739eca2..bbc0b37175df 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -1324,7 +1324,7 @@ MCTargetStreamer *createARMNullTargetStreamer(MCStreamer &S) {
MCTargetStreamer *createARMObjectTargetStreamer(MCStreamer &S,
const MCSubtargetInfo &STI) {
- Triple TT(STI.getTargetTriple());
+ const Triple &TT = STI.getTargetTriple();
if (TT.getObjectFormat() == Triple::ELF)
return new ARMTargetELFStreamer(S);
return new ARMTargetStreamer(S);
@@ -1345,6 +1345,6 @@ MCELFStreamer *createARMELFStreamer(MCContext &Context, MCAsmBackend &TAB,
return S;
}
-}
+} // namespace llvm
diff --git a/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h b/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
index 46ba57170db5..23ef50132900 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMFixupKinds.h
@@ -104,7 +104,7 @@ enum Fixups {
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
};
-}
-}
+} // namespace ARM
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
index 84bb092fa286..b88578309f08 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp
@@ -57,7 +57,7 @@ public:
return isThumb(STI) && STI.getFeatureBits()[ARM::FeatureThumb2];
}
bool isTargetMachO(const MCSubtargetInfo &STI) const {
- Triple TT(STI.getTargetTriple());
+ const Triple &TT = STI.getTargetTriple();
return TT.isOSBinFormatMachO();
}
@@ -1065,7 +1065,7 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx,
// it's just a plain immediate expression, previously those evaluated to
// the lower 16 bits of the expression regardless of whether
// we have a movt or a movw, but that led to misleadingly results.
- // This is now disallowed in the the AsmParser in validateInstruction()
+ // This is disallowed in the AsmParser in validateInstruction()
// so this should never happen.
llvm_unreachable("expression without :upper16: or :lower16:");
}
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index 92c4d6a824ea..0fb395e473a6 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -130,16 +130,13 @@ static bool getARMLoadDeprecationInfo(MCInst &MI, MCSubtargetInfo &STI,
#define GET_SUBTARGETINFO_MC_DESC
#include "ARMGenSubtargetInfo.inc"
-
-std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) {
- Triple triple(TT);
-
- bool isThumb = triple.getArch() == Triple::thumb ||
- triple.getArch() == Triple::thumbeb;
+std::string ARM_MC::ParseARMTriple(const Triple &TT, StringRef CPU) {
+ bool isThumb =
+ TT.getArch() == Triple::thumb || TT.getArch() == Triple::thumbeb;
bool NoCPU = CPU == "generic" || CPU.empty();
std::string ARMArchFeature;
- switch (triple.getSubArch()) {
+ switch (TT.getSubArch()) {
default:
llvm_unreachable("invalid sub-architecture for ARM");
case Triple::ARMSubArch_v8:
@@ -240,7 +237,7 @@ std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) {
ARMArchFeature += ",+thumb-mode";
}
- if (triple.isOSNaCl()) {
+ if (TT.isOSNaCl()) {
if (ARMArchFeature.empty())
ARMArchFeature = "+nacl-trap";
else
@@ -250,8 +247,8 @@ std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) {
return ARMArchFeature;
}
-MCSubtargetInfo *ARM_MC::createARMMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
+MCSubtargetInfo *ARM_MC::createARMMCSubtargetInfo(const Triple &TT,
+ StringRef CPU, StringRef FS) {
std::string ArchFS = ARM_MC::ParseARMTriple(TT, CPU);
if (!FS.empty()) {
if (!ArchFS.empty())
@@ -332,10 +329,9 @@ static MCInstPrinter *createARMMCInstPrinter(const Triple &T,
return nullptr;
}
-static MCRelocationInfo *createARMMCRelocationInfo(StringRef TT,
+static MCRelocationInfo *createARMMCRelocationInfo(const Triple &TT,
MCContext &Ctx) {
- Triple TheTriple(TT);
- if (TheTriple.isOSBinFormatMachO())
+ if (TT.isOSBinFormatMachO())
return createARMMachORelocationInfo(Ctx);
// Default to the stock relocation info.
return llvm::createMCRelocationInfo(TT, Ctx);
@@ -374,7 +370,7 @@ public:
}
};
-}
+} // namespace
static MCInstrAnalysis *createARMMCInstrAnalysis(const MCInstrInfo *Info) {
return new ARMMCInstrAnalysis(Info);
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
index 24ca567a8124..c6f2d1341623 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h
@@ -40,12 +40,12 @@ extern Target TheARMLETarget, TheThumbLETarget;
extern Target TheARMBETarget, TheThumbBETarget;
namespace ARM_MC {
- std::string ParseARMTriple(StringRef TT, StringRef CPU);
+std::string ParseARMTriple(const Triple &TT, StringRef CPU);
- /// Create a ARM MCSubtargetInfo instance. This is exposed so Asm parser, etc.
- /// do not need to go through TargetRegistry.
- MCSubtargetInfo *createARMMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS);
+/// Create a ARM MCSubtargetInfo instance. This is exposed so Asm parser, etc.
+/// do not need to go through TargetRegistry.
+MCSubtargetInfo *createARMMCSubtargetInfo(const Triple &TT, StringRef CPU,
+ StringRef FS);
}
MCTargetStreamer *createARMNullTargetStreamer(MCStreamer &S);
@@ -65,20 +65,22 @@ MCCodeEmitter *createARMBEMCCodeEmitter(const MCInstrInfo &MCII,
MCContext &Ctx);
MCAsmBackend *createARMAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU,
+ const Triple &TT, StringRef CPU,
bool IsLittleEndian);
MCAsmBackend *createARMLEAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
MCAsmBackend *createARMBEAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
-MCAsmBackend *createThumbLEAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+MCAsmBackend *createThumbLEAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
-MCAsmBackend *createThumbBEAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+MCAsmBackend *createThumbBEAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
// Construct a PE/COFF machine code streamer which will generate a PE/COFF
// object file.
@@ -101,7 +103,7 @@ MCObjectWriter *createARMWinCOFFObjectWriter(raw_pwrite_stream &OS,
/// Construct ARM Mach-O relocation info.
MCRelocationInfo *createARMMachORelocationInfo(MCContext &Ctx);
-} // End llvm namespace
+} // namespace llvm
// Defines symbolic names for ARM registers. This defines a mapping from
// register name to register number.
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
index 95d7ea7c04a3..6ac778e0cecd 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp
@@ -56,7 +56,7 @@ public:
const MCFixup &Fixup, MCValue Target,
uint64_t &FixedValue) override;
};
-}
+} // namespace
static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
unsigned &Log2Size) {
diff --git a/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp
index 173cc93d44fb..32481e276b00 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMUnwindOpAsm.cpp
@@ -60,7 +60,7 @@ namespace {
EmitByte(ARM::EHABI::UNWIND_OPCODE_FINISH);
}
};
-}
+} // namespace
void UnwindOpcodeAssembler::EmitRegSave(uint32_t RegSave) {
if (RegSave == 0u)
diff --git a/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp
index 166c04b41a77..34b552f7a212 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMWinCOFFObjectWriter.cpp
@@ -79,7 +79,7 @@ unsigned ARMWinCOFFObjectWriter::getRelocType(const MCValue &Target,
bool ARMWinCOFFObjectWriter::recordRelocation(const MCFixup &Fixup) const {
return static_cast<unsigned>(Fixup.getKind()) != ARM::fixup_t2_movt_hi16;
}
-}
+} // namespace
namespace llvm {
MCObjectWriter *createARMWinCOFFObjectWriter(raw_pwrite_stream &OS,
diff --git a/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp
index b993b1be4847..6515a650be59 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMWinCOFFStreamer.cpp
@@ -35,7 +35,7 @@ void ARMWinCOFFStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
void ARMWinCOFFStreamer::EmitThumbFunc(MCSymbol *Symbol) {
getAssembler().setIsThumbFunc(Symbol);
}
-}
+} // namespace
MCStreamer *llvm::createARMWinCOFFStreamer(MCContext &Context,
MCAsmBackend &MAB,
diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp
index ed2deeaa24c0..ca98f696b7dd 100644
--- a/lib/Target/ARM/MLxExpansionPass.cpp
+++ b/lib/Target/ARM/MLxExpansionPass.cpp
@@ -71,7 +71,7 @@ namespace {
bool ExpandFPMLxInstructions(MachineBasicBlock &MBB);
};
char MLxExpansion::ID = 0;
-}
+} // namespace
void MLxExpansion::clearStack() {
std::fill(LastMIs, LastMIs + 4, nullptr);
diff --git a/lib/Target/ARM/Thumb1FrameLowering.h b/lib/Target/ARM/Thumb1FrameLowering.h
index 31d57325ebd6..e5e89fad3d71 100644
--- a/lib/Target/ARM/Thumb1FrameLowering.h
+++ b/lib/Target/ARM/Thumb1FrameLowering.h
@@ -47,6 +47,6 @@ public:
MachineBasicBlock::iterator MI) const override;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/Thumb1InstrInfo.h b/lib/Target/ARM/Thumb1InstrInfo.h
index f3f493d89237..31b4df2e5b0c 100644
--- a/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/lib/Target/ARM/Thumb1InstrInfo.h
@@ -58,6 +58,6 @@ private:
void expandLoadStackGuard(MachineBasicBlock::iterator MI,
Reloc::Model RM) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/Thumb2ITBlockPass.cpp b/lib/Target/ARM/Thumb2ITBlockPass.cpp
index 68736bc1decd..7ce602d326cd 100644
--- a/lib/Target/ARM/Thumb2ITBlockPass.cpp
+++ b/lib/Target/ARM/Thumb2ITBlockPass.cpp
@@ -48,7 +48,7 @@ namespace {
bool InsertITInstructions(MachineBasicBlock &MBB);
};
char Thumb2ITBlockPass::ID = 0;
-}
+} // namespace
/// TrackDefUses - Tracking what registers are being defined and used by
/// instructions in the IT block. This also tracks "dependencies", i.e. uses
diff --git a/lib/Target/ARM/Thumb2InstrInfo.h b/lib/Target/ARM/Thumb2InstrInfo.h
index 916ab06ec305..d186dfb2ec91 100644
--- a/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/lib/Target/ARM/Thumb2InstrInfo.h
@@ -73,6 +73,6 @@ private:
ARMCC::CondCodes getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg);
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp
index d9ab824995c1..0dd1b4c15ef8 100644
--- a/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -202,7 +202,7 @@ namespace {
std::function<bool(const Function &)> PredicateFtor;
};
char Thumb2SizeReduce::ID = 0;
-}
+} // namespace
Thumb2SizeReduce::Thumb2SizeReduce(std::function<bool(const Function &)> Ftor)
: MachineFunctionPass(ID), PredicateFtor(Ftor) {
diff --git a/lib/Target/ARM/ThumbRegisterInfo.h b/lib/Target/ARM/ThumbRegisterInfo.h
index 23aaff37f409..e55f88f53aec 100644
--- a/lib/Target/ARM/ThumbRegisterInfo.h
+++ b/lib/Target/ARM/ThumbRegisterInfo.h
@@ -60,6 +60,6 @@ public:
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/BPFAsmPrinter.cpp b/lib/Target/BPF/BPFAsmPrinter.cpp
index 10ec6587550b..9d0aa7a98a64 100644
--- a/lib/Target/BPF/BPFAsmPrinter.cpp
+++ b/lib/Target/BPF/BPFAsmPrinter.cpp
@@ -44,7 +44,7 @@ public:
const char *Modifier = nullptr);
void EmitInstruction(const MachineInstr *MI) override;
};
-}
+} // namespace
void BPFAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
raw_ostream &O, const char *Modifier) {
diff --git a/lib/Target/BPF/BPFFrameLowering.h b/lib/Target/BPF/BPFFrameLowering.h
index 3b9fc443e053..a6fe7c98115b 100644
--- a/lib/Target/BPF/BPFFrameLowering.h
+++ b/lib/Target/BPF/BPFFrameLowering.h
@@ -37,5 +37,5 @@ public:
MBB.erase(MI);
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp
index d9e654c76428..b49de3a27083 100644
--- a/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -51,7 +51,7 @@ private:
// Complex Pattern for address selection.
bool SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset);
};
-}
+} // namespace
// ComplexPattern used on BPF Load/Store instructions
bool BPFDAGToDAGISel::SelectAddr(SDValue Addr, SDValue &Base, SDValue &Offset) {
diff --git a/lib/Target/BPF/BPFISelLowering.cpp b/lib/Target/BPF/BPFISelLowering.cpp
index 38c56bbef81e..21d160d49946 100644
--- a/lib/Target/BPF/BPFISelLowering.cpp
+++ b/lib/Target/BPF/BPFISelLowering.cpp
@@ -86,7 +86,7 @@ public:
};
int DiagnosticInfoUnsupported::KindID = 0;
-}
+} // namespace
BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
const BPFSubtarget &STI)
diff --git a/lib/Target/BPF/BPFISelLowering.h b/lib/Target/BPF/BPFISelLowering.h
index ec71dca2faeb..b56bb39ca85d 100644
--- a/lib/Target/BPF/BPFISelLowering.h
+++ b/lib/Target/BPF/BPFISelLowering.h
@@ -85,6 +85,6 @@ private:
return true;
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/BPFInstrInfo.cpp b/lib/Target/BPF/BPFInstrInfo.cpp
index 28bd0ec6ebef..83d14efc1a6c 100644
--- a/lib/Target/BPF/BPFInstrInfo.cpp
+++ b/lib/Target/BPF/BPFInstrInfo.cpp
@@ -133,7 +133,7 @@ bool BPFInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
unsigned BPFInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
diff --git a/lib/Target/BPF/BPFInstrInfo.h b/lib/Target/BPF/BPFInstrInfo.h
index 4056c2efbbd0..bd96f76a8075 100644
--- a/lib/Target/BPF/BPFInstrInfo.h
+++ b/lib/Target/BPF/BPFInstrInfo.h
@@ -51,10 +51,9 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/BPFMCInstLower.h b/lib/Target/BPF/BPFMCInstLower.h
index 054e89407db2..ba9189792cbb 100644
--- a/lib/Target/BPF/BPFMCInstLower.h
+++ b/lib/Target/BPF/BPFMCInstLower.h
@@ -38,6 +38,6 @@ public:
MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/BPFRegisterInfo.h b/lib/Target/BPF/BPFRegisterInfo.h
index 7072dd0bde1a..44977a210959 100644
--- a/lib/Target/BPF/BPFRegisterInfo.h
+++ b/lib/Target/BPF/BPFRegisterInfo.h
@@ -35,6 +35,6 @@ struct BPFRegisterInfo : public BPFGenRegisterInfo {
unsigned getFrameRegister(const MachineFunction &MF) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/BPFSubtarget.cpp b/lib/Target/BPF/BPFSubtarget.cpp
index 7f7a26213154..65acd585116d 100644
--- a/lib/Target/BPF/BPFSubtarget.cpp
+++ b/lib/Target/BPF/BPFSubtarget.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
void BPFSubtarget::anchor() {}
-BPFSubtarget::BPFSubtarget(const std::string &TT, const std::string &CPU,
+BPFSubtarget::BPFSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM)
: BPFGenSubtargetInfo(TT, CPU, FS), InstrInfo(), FrameLowering(*this),
TLInfo(TM, *this), TSInfo(TM.getDataLayout()) {}
diff --git a/lib/Target/BPF/BPFSubtarget.h b/lib/Target/BPF/BPFSubtarget.h
index 347cffd82e03..701ac577dd74 100644
--- a/lib/Target/BPF/BPFSubtarget.h
+++ b/lib/Target/BPF/BPFSubtarget.h
@@ -38,8 +38,8 @@ class BPFSubtarget : public BPFGenSubtargetInfo {
public:
// This constructor initializes the data members to match that
// of the specified triple.
- BPFSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const TargetMachine &TM);
+ BPFSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
+ const TargetMachine &TM);
// ParseSubtargetFeatures - Parses features string setting specified
// subtarget options. Definition of function is auto generated by tblgen.
@@ -59,6 +59,6 @@ public:
return &InstrInfo.getRegisterInfo();
}
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/BPFTargetMachine.cpp b/lib/Target/BPF/BPFTargetMachine.cpp
index 3329d5f87409..5a888a955e33 100644
--- a/lib/Target/BPF/BPFTargetMachine.cpp
+++ b/lib/Target/BPF/BPFTargetMachine.cpp
@@ -29,19 +29,20 @@ extern "C" void LLVMInitializeBPFTarget() {
}
// DataLayout: little or big endian
-static std::string computeDataLayout(StringRef TT) {
- if (Triple(TT).getArch() == Triple::bpfeb)
+static std::string computeDataLayout(const Triple &TT) {
+ if (TT.getArch() == Triple::bpfeb)
return "E-m:e-p:64:64-i64:64-n32:64-S128";
else
return "e-m:e-p:64:64-i64:64-n32:64-S128";
}
-BPFTargetMachine::BPFTargetMachine(const Target &T, StringRef TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options,
+BPFTargetMachine::BPFTargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS,
- Options, RM, CM, OL),
+ : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, RM, CM,
+ OL),
TLOF(make_unique<TargetLoweringObjectFileELF>()),
Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
@@ -59,7 +60,7 @@ public:
bool addInstSelector() override;
};
-}
+} // namespace
TargetPassConfig *BPFTargetMachine::createPassConfig(PassManagerBase &PM) {
return new BPFPassConfig(this, PM);
diff --git a/lib/Target/BPF/BPFTargetMachine.h b/lib/Target/BPF/BPFTargetMachine.h
index 6aeafb99a2ad..c715fd5f0089 100644
--- a/lib/Target/BPF/BPFTargetMachine.h
+++ b/lib/Target/BPF/BPFTargetMachine.h
@@ -23,8 +23,8 @@ class BPFTargetMachine : public LLVMTargetMachine {
BPFSubtarget Subtarget;
public:
- BPFTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM,
+ BPFTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL);
const BPFSubtarget *getSubtargetImpl() const { return &Subtarget; }
@@ -38,6 +38,6 @@ public:
return TLOF.get();
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/InstPrinter/BPFInstPrinter.h b/lib/Target/BPF/InstPrinter/BPFInstPrinter.h
index adcaff686933..cb074713cce5 100644
--- a/lib/Target/BPF/InstPrinter/BPFInstPrinter.h
+++ b/lib/Target/BPF/InstPrinter/BPFInstPrinter.h
@@ -37,6 +37,6 @@ public:
void printInstruction(const MCInst *MI, raw_ostream &O);
static const char *getRegisterName(unsigned RegNo);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/LLVMBuild.txt b/lib/Target/BPF/LLVMBuild.txt
index 11578c8ee21f..66dbf86fa427 100644
--- a/lib/Target/BPF/LLVMBuild.txt
+++ b/lib/Target/BPF/LLVMBuild.txt
@@ -28,5 +28,15 @@ has_asmprinter = 1
type = Library
name = BPFCodeGen
parent = BPF
-required_libraries = AsmPrinter CodeGen Core MC BPFAsmPrinter BPFDesc BPFInfo SelectionDAG Support Target
+required_libraries =
+ AsmPrinter
+ CodeGen
+ Core
+ MC
+ BPFAsmPrinter
+ BPFDesc
+ BPFInfo
+ SelectionDAG
+ Support
+ Target
add_to_library_groups = BPF
diff --git a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
index 7b1d9259caf9..33aecb7b8ec3 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFAsmBackend.cpp
@@ -84,16 +84,16 @@ void BPFAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
MCObjectWriter *BPFAsmBackend::createObjectWriter(raw_pwrite_stream &OS) const {
return createBPFELFObjectWriter(OS, 0, IsLittleEndian);
}
-}
+} // namespace
MCAsmBackend *llvm::createBPFAsmBackend(const Target &T,
- const MCRegisterInfo &MRI, StringRef TT,
- StringRef CPU) {
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU) {
return new BPFAsmBackend(/*IsLittleEndian=*/true);
}
MCAsmBackend *llvm::createBPFbeAsmBackend(const Target &T,
- const MCRegisterInfo &MRI, StringRef TT,
- StringRef CPU) {
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU) {
return new BPFAsmBackend(/*IsLittleEndian=*/false);
}
diff --git a/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp b/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
index 05ba6183e322..ef4f05f3d810 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFELFObjectWriter.cpp
@@ -25,7 +25,7 @@ protected:
unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
bool IsPCRel) const override;
};
-}
+} // namespace
BPFELFObjectWriter::BPFELFObjectWriter(uint8_t OSABI)
: MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_NONE,
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h b/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
index d63bbf49294e..22376543bd05 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCAsmInfo.h
@@ -36,6 +36,6 @@ public:
HasDotTypeDotSizeDirective = false;
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp b/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp
index dc4ede30f191..b579afd690e9 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCCodeEmitter.cpp
@@ -58,7 +58,7 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const override;
};
-}
+} // namespace
MCCodeEmitter *llvm::createBPFMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp
index 7cedba90a746..3e928fc93a37 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.cpp
@@ -46,8 +46,8 @@ static MCRegisterInfo *createBPFMCRegisterInfo(StringRef TT) {
return X;
}
-static MCSubtargetInfo *createBPFMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
+static MCSubtargetInfo *createBPFMCSubtargetInfo(const Triple &TT,
+ StringRef CPU, StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
InitBPFMCSubtargetInfo(X, TT, CPU, FS);
return X;
diff --git a/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h
index a9ba7d990e17..3d2583a11349 100644
--- a/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h
+++ b/lib/Target/BPF/MCTargetDesc/BPFMCTargetDesc.h
@@ -25,8 +25,9 @@ class MCInstrInfo;
class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
-class Target;
class StringRef;
+class Target;
+class Triple;
class raw_ostream;
class raw_pwrite_stream;
@@ -42,13 +43,13 @@ MCCodeEmitter *createBPFbeMCCodeEmitter(const MCInstrInfo &MCII,
MCContext &Ctx);
MCAsmBackend *createBPFAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
MCAsmBackend *createBPFbeAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
MCObjectWriter *createBPFELFObjectWriter(raw_pwrite_stream &OS,
uint8_t OSABI, bool IsLittleEndian);
-}
+} // namespace llvm
// Defines symbolic names for BPF registers. This defines a mapping from
// register name to register number.
diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp
index b8377986ecc0..9c9c097b4c3d 100644
--- a/lib/Target/CppBackend/CPPBackend.cpp
+++ b/lib/Target/CppBackend/CPPBackend.cpp
@@ -513,6 +513,7 @@ void CppWriter::printAttributes(const AttributeSet &PAL,
HANDLE_ATTR(StackProtect);
HANDLE_ATTR(StackProtectReq);
HANDLE_ATTR(StackProtectStrong);
+ HANDLE_ATTR(SafeStack);
HANDLE_ATTR(NoCapture);
HANDLE_ATTR(NoRedZone);
HANDLE_ATTR(NoImplicitFloat);
@@ -2148,7 +2149,8 @@ char CppWriter::ID = 0;
bool CPPTargetMachine::addPassesToEmitFile(
PassManagerBase &PM, raw_pwrite_stream &o, CodeGenFileType FileType,
- bool DisableVerify, AnalysisID StartAfter, AnalysisID StopAfter) {
+ bool DisableVerify, AnalysisID StartAfter, AnalysisID StopAfter,
+ MachineFunctionInitializer *MFInitializer) {
if (FileType != TargetMachine::CGFT_AssemblyFile)
return true;
auto FOut = llvm::make_unique<formatted_raw_ostream>(o);
diff --git a/lib/Target/CppBackend/CPPTargetMachine.h b/lib/Target/CppBackend/CPPTargetMachine.h
index 02d705e2d8f3..0cd20daa12fa 100644
--- a/lib/Target/CppBackend/CPPTargetMachine.h
+++ b/lib/Target/CppBackend/CPPTargetMachine.h
@@ -23,21 +23,21 @@ namespace llvm {
class formatted_raw_ostream;
struct CPPTargetMachine : public TargetMachine {
- CPPTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM,
+ CPPTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL)
: TargetMachine(T, "", TT, CPU, FS, Options) {}
public:
bool addPassesToEmitFile(PassManagerBase &PM, raw_pwrite_stream &Out,
CodeGenFileType FileType, bool DisableVerify,
- AnalysisID StartAfter,
- AnalysisID StopAfter) override;
+ AnalysisID StartAfter, AnalysisID StopAfter,
+ MachineFunctionInitializer *MFInitializer) override;
};
extern Target TheCppBackendTarget;
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
index 14f9d777580c..837838afc0f2 100644
--- a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
+++ b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp
@@ -53,7 +53,7 @@ public:
raw_ostream &VStream,
raw_ostream &CStream) const override;
};
-}
+} // namespace
static DecodeStatus DecodeModRegsRegisterClass(MCInst &Inst, unsigned RegNo,
uint64_t Address,
@@ -69,6 +69,33 @@ static unsigned GetSubinstOpcode(unsigned IClass, unsigned inst, unsigned &op,
raw_ostream &os);
static void AddSubinstOperands(MCInst *MI, unsigned opcode, unsigned inst);
+static DecodeStatus s16ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s12ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s11_0ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s11_1ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s11_2ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s11_3ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s10ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s8ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s6_0ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s4_0ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s4_1ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s4_2ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus s4_3ImmDecoder(MCInst &MI, unsigned tmp, uint64_t Address,
+ const void *Decoder);
+
static const uint16_t IntRegDecoderTable[] = {
Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
Hexagon::R5, Hexagon::R6, Hexagon::R7, Hexagon::R8, Hexagon::R9,
@@ -356,6 +383,97 @@ DecodeStatus HexagonDisassembler::getSingleInstruction(
return Result;
}
+static DecodeStatus s16ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<16>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s12ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<12>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s11_0ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<11>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s11_1ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<12>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s11_2ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<13>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s11_3ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<14>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s10ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<10>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s8ImmDecoder(MCInst &MI, unsigned tmp, uint64_t /*Address*/,
+ const void *Decoder) {
+ uint64_t imm = SignExtend64<8>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s6_0ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<6>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s4_0ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<4>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s4_1ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<5>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s4_2ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<6>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus s4_3ImmDecoder(MCInst &MI, unsigned tmp,
+ uint64_t /*Address*/, const void *Decoder) {
+ uint64_t imm = SignExtend64<7>(tmp);
+ MI.addOperand(MCOperand::createImm(imm));
+ return MCDisassembler::Success;
+}
+
// These values are from HexagonGenMCCodeEmitter.inc and HexagonIsetDx.td
enum subInstBinaryValues {
V4_SA1_addi_BITS = 0x0000,
diff --git a/lib/Target/Hexagon/Hexagon.h b/lib/Target/Hexagon/Hexagon.h
index 6e2ecaf57e49..b24d24a6d6f2 100644
--- a/lib/Target/Hexagon/Hexagon.h
+++ b/lib/Target/Hexagon/Hexagon.h
@@ -15,50 +15,6 @@
#ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGON_H
#define LLVM_LIB_TARGET_HEXAGON_HEXAGON_H
-#include "MCTargetDesc/HexagonMCTargetDesc.h"
-#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
- class FunctionPass;
- class HexagonAsmPrinter;
- class HexagonTargetMachine;
- class MachineInstr;
- class MCInst;
- class ModulePass;
- class raw_ostream;
- class TargetMachine;
-
- FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM,
- CodeGenOpt::Level OptLevel);
- FunctionPass *createHexagonDelaySlotFillerPass(const TargetMachine &TM);
- FunctionPass *createHexagonFPMoverPass(const TargetMachine &TM);
- FunctionPass *createHexagonRemoveExtendArgs(const HexagonTargetMachine &TM);
- FunctionPass *createHexagonCFGOptimizer();
-
- FunctionPass *createHexagonSplitConst32AndConst64();
- FunctionPass *createHexagonExpandPredSpillCode();
- FunctionPass *createHexagonHardwareLoops();
- FunctionPass *createHexagonPeephole();
- FunctionPass *createHexagonFixupHwLoops();
- FunctionPass *createHexagonNewValueJump();
- FunctionPass *createHexagonCopyToCombine();
- FunctionPass *createHexagonPacketizer();
- FunctionPass *createHexagonNewValueJump();
-
-/* TODO: object output.
- MCCodeEmitter *createHexagonMCCodeEmitter(const Target &,
- const TargetMachine &TM,
- MCContext &Ctx);
-*/
-/* TODO: assembler input.
- TargetAsmBackend *createHexagonAsmBackend(const Target &,
- const std::string &);
-*/
- void HexagonLowerToMC(MachineInstr const *MI, MCInst &MCI,
- HexagonAsmPrinter &AP);
-} // end namespace llvm;
-
#define Hexagon_POINTER_SIZE 4
#define Hexagon_PointerSize (Hexagon_POINTER_SIZE)
@@ -75,7 +31,7 @@ namespace llvm {
// Maximum number of words and instructions in a packet.
#define HEXAGON_PACKET_SIZE 4
-
+#define HEXAGON_MAX_PACKET_SIZE (HEXAGON_PACKET_SIZE * HEXAGON_INSTR_SIZE)
// Minimum number of instructions in an end-loop packet.
#define HEXAGON_PACKET_INNER_SIZE 2
#define HEXAGON_PACKET_OUTER_SIZE 3
@@ -83,4 +39,25 @@ namespace llvm {
// including a compound one or a duplex or an extender.
#define HEXAGON_PRESHUFFLE_PACKET_SIZE (HEXAGON_PACKET_SIZE + 3)
+// Name of the global offset table as defined by the Hexagon ABI
+#define HEXAGON_GOT_SYM_NAME "_GLOBAL_OFFSET_TABLE_"
+
+#include "MCTargetDesc/HexagonMCTargetDesc.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+ class MachineInstr;
+ class MCInst;
+ class MCInstrInfo;
+ class HexagonAsmPrinter;
+ class HexagonTargetMachine;
+
+ void HexagonLowerToMC(const MachineInstr *MI, MCInst &MCI,
+ HexagonAsmPrinter &AP);
+
+ /// \brief Creates a Hexagon-specific Target Transformation Info pass.
+ ImmutablePass *createHexagonTargetTransformInfoPass(const HexagonTargetMachine *TM);
+} // namespace llvm
+
#endif
diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.h b/lib/Target/Hexagon/HexagonAsmPrinter.h
index 792fc8b7af3a..f09a5b91fe8b 100755
--- a/lib/Target/Hexagon/HexagonAsmPrinter.h
+++ b/lib/Target/Hexagon/HexagonAsmPrinter.h
@@ -53,6 +53,6 @@ namespace llvm {
static const char *getRegisterName(unsigned RegNo);
};
-} // end of llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
index 703e691e612f..ff1a4fe30757 100644
--- a/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
+++ b/lib/Target/Hexagon/HexagonCFGOptimizer.cpp
@@ -28,6 +28,7 @@ using namespace llvm;
#define DEBUG_TYPE "hexagon_cfg"
namespace llvm {
+ FunctionPass *createHexagonCFGOptimizer();
void initializeHexagonCFGOptimizerPass(PassRegistry&);
}
@@ -227,7 +228,7 @@ bool HexagonCFGOptimizer::runOnMachineFunction(MachineFunction &Fn) {
}
return true;
}
-}
+} // namespace
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index 1d6455c66fa5..9fd863f6e153 100644
--- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -49,6 +49,7 @@ MaxNumOfInstsBetweenNewValueStoreAndTFR("max-num-inst-between-tfr-and-nv-store",
"consider the store still to be newifiable"));
namespace llvm {
+ FunctionPass *createHexagonCopyToCombine();
void initializeHexagonCopyToCombinePass(PassRegistry&);
}
diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 37ed173a79cd..33766dfb830c 100644
--- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -173,7 +173,7 @@ namespace {
bool coalesceRegisters(RegisterRef R1, RegisterRef R2);
bool coalesceSegments(MachineFunction &MF);
};
-}
+} // namespace
char HexagonExpandCondsets::ID = 0;
diff --git a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
index 40059fb27371..1657d88a4f43 100644
--- a/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
+++ b/lib/Target/Hexagon/HexagonExpandPredSpillCode.cpp
@@ -41,6 +41,7 @@ using namespace llvm;
namespace llvm {
+ FunctionPass *createHexagonExpandPredSpillCode();
void initializeHexagonExpandPredSpillCodePass(PassRegistry&);
}
@@ -332,7 +333,7 @@ bool HexagonExpandPredSpillCode::runOnMachineFunction(MachineFunction &Fn) {
return true;
}
-}
+} // namespace
//===----------------------------------------------------------------------===//
// Public Constructor Functions
diff --git a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
index 3d786a92b9e5..3ea77cdbb1f7 100644
--- a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
+++ b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp
@@ -30,6 +30,7 @@ static cl::opt<unsigned> MaxLoopRange(
cl::desc("Restrict range of loopN instructions (testing only)"));
namespace llvm {
+ FunctionPass *createHexagonFixupHwLoops();
void initializeHexagonFixupHwLoopsPass(PassRegistry&);
}
@@ -66,7 +67,7 @@ namespace {
};
char HexagonFixupHwLoops::ID = 0;
-}
+} // namespace
INITIALIZE_PASS(HexagonFixupHwLoops, "hwloopsfixup",
"Hexagon Hardware Loops Fixup", false, false)
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 868f87e18413..9797134f41ad 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -238,7 +238,7 @@ namespace {
return true;
return false;
}
-}
+} // namespace
/// Implements shrink-wrapping of the stack frame. By default, stack frame
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.h b/lib/Target/Hexagon/HexagonFrameLowering.h
index 89500cb85724..767e13cbd6a6 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.h
+++ b/lib/Target/Hexagon/HexagonFrameLowering.h
@@ -99,6 +99,6 @@ private:
bool useRestoreFunction(MachineFunction &MF, const CSIVect &CSI) const;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index db72899388e5..53b6bf617e8f 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -63,6 +63,7 @@ static cl::opt<bool> HWCreatePreheader("hexagon-hwloop-preheader",
STATISTIC(NumHWLoops, "Number of loops converted to hardware loops");
namespace llvm {
+ FunctionPass *createHexagonHardwareLoops();
void initializeHexagonHardwareLoopsPass(PassRegistry&);
}
diff --git a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
index 7a213aad072c..9123057e60d1 100644
--- a/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
+++ b/lib/Target/Hexagon/HexagonISelDAGToDAG.cpp
@@ -104,6 +104,7 @@ public:
SDNode *SelectConstantFP(SDNode *N);
SDNode *SelectAdd(SDNode *N);
SDNode *SelectBitOp(SDNode *N);
+ bool isConstExtProfitable(SDNode *N) const;
// XformMskToBitPosU5Imm - Returns the bit position which
// the single bit 32 bit mask represents.
@@ -1327,6 +1328,20 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
return false;
}
+bool HexagonDAGToDAGISel::isConstExtProfitable(SDNode *N) const {
+ unsigned UseCount = 0;
+ unsigned CallCount = 0;
+ for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
+ // Ignore call instructions.
+ if (I->getOpcode() == ISD::CopyToReg)
+ ++CallCount;
+ UseCount++;
+ }
+
+ return (UseCount <= 1) || (CallCount > 1);
+
+}
+
void HexagonDAGToDAGISel::PreprocessISelDAG() {
SelectionDAG &DAG = *CurDAG;
std::vector<SDNode*> Nodes;
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index 74d92aef25ac..1a14c88f04fd 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -95,7 +95,7 @@ public:
unsigned getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
};
-}
+} // namespace
// Implement calling convention for Hexagon.
static bool
@@ -397,7 +397,9 @@ HexagonTargetLowering::LowerReturn(SDValue Chain,
bool HexagonTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
// If either no tail call or told not to tail call at all, don't.
- if (!CI->isTailCall() || HTM.Options.DisableTailCalls)
+ auto Attr =
+ CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
+ if (!CI->isTailCall() || Attr.getValueAsString() == "true")
return false;
return true;
@@ -486,7 +488,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
else
CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
- if (DAG.getTarget().Options.DisableTailCalls)
+ auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ if (Attr.getValueAsString() == "true")
isTailCall = false;
if (isTailCall) {
diff --git a/lib/Target/Hexagon/HexagonISelLowering.h b/lib/Target/Hexagon/HexagonISelLowering.h
index b80e8477eb7b..b9d18df05b54 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/lib/Target/Hexagon/HexagonISelLowering.h
@@ -86,7 +86,7 @@ bool isPositiveHalfWord(SDNode *N);
OP_END
};
- }
+ } // namespace HexagonISD
class HexagonSubtarget;
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index e566a97789a9..3cb082349b41 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -159,7 +159,7 @@ findLoopInstr(MachineBasicBlock *BB, int EndLoopOp,
unsigned HexagonInstrInfo::InsertBranch(
MachineBasicBlock &MBB,MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
+ ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
Opcode_t BOpc = Hexagon::J2_jump;
Opcode_t BccOpc = Hexagon::J2_jumpt;
@@ -1013,7 +1013,7 @@ int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
bool HexagonInstrInfo::
PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ ArrayRef<MachineOperand> Cond) const {
if (Cond.empty() || isEndLoopN(Cond[0].getImm())) {
DEBUG(dbgs() << "\nCannot predicate:"; MI->dump(););
return false;
@@ -1162,8 +1162,8 @@ HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
bool
HexagonInstrInfo::
-SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const {
+SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const {
// TODO: Fix this
return false;
}
@@ -1982,8 +1982,7 @@ bool HexagonInstrInfo::PredOpcodeHasJMP_c(Opcode_t Opcode) const {
(Opcode == Hexagon::J2_jumpf);
}
-bool HexagonInstrInfo::predOpcodeHasNot(
- const SmallVectorImpl<MachineOperand> &Cond) const {
+bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const {
if (Cond.empty() || !isPredicated(Cond[0].getImm()))
return false;
return !isPredicatedTrue(Cond[0].getImm());
@@ -1994,7 +1993,7 @@ bool HexagonInstrInfo::isEndLoopN(Opcode_t Opcode) const {
Opcode == Hexagon::ENDLOOP1);
}
-bool HexagonInstrInfo::getPredReg(const SmallVectorImpl<MachineOperand> &Cond,
+bool HexagonInstrInfo::getPredReg(ArrayRef<MachineOperand> Cond,
unsigned &PredReg, unsigned &PredRegPos,
unsigned &PredRegFlags) const {
if (Cond.empty())
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h
index a7ae65e4eb9c..91f508ee5ecf 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -69,8 +69,7 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
bool analyzeCompare(const MachineInstr *MI,
@@ -129,7 +128,7 @@ public:
bool isBranch(const MachineInstr *MI) const;
bool isPredicable(MachineInstr *MI) const override;
bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Cond) const override;
+ ArrayRef<MachineOperand> Cond) const override;
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
unsigned ExtraPredCycles,
@@ -149,8 +148,8 @@ public:
bool isPredicatedNew(unsigned Opcode) const;
bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const override;
- bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const override;
+ bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const override;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
@@ -222,15 +221,14 @@ public:
bool NonExtEquivalentExists (const MachineInstr *MI) const;
short getNonExtOpcode(const MachineInstr *MI) const;
bool PredOpcodeHasJMP_c(Opcode_t Opcode) const;
- bool predOpcodeHasNot(const SmallVectorImpl<MachineOperand> &Cond) const;
+ bool predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const;
bool isEndLoopN(Opcode_t Opcode) const;
- bool getPredReg(const SmallVectorImpl<MachineOperand> &Cond,
- unsigned &PredReg, unsigned &PredRegPos,
- unsigned &PredRegFlags) const;
+ bool getPredReg(ArrayRef<MachineOperand> Cond, unsigned &PredReg,
+ unsigned &PredRegPos, unsigned &PredRegFlags) const;
int getCondOpcode(int Opc, bool sense) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/HexagonIntrinsics.td b/lib/Target/Hexagon/HexagonIntrinsics.td
index 4275230ba717..1d0d015f798b 100644
--- a/lib/Target/Hexagon/HexagonIntrinsics.td
+++ b/lib/Target/Hexagon/HexagonIntrinsics.td
@@ -676,6 +676,7 @@ def : Pat <(int_hexagon_A2_tfrih (I32:$Rs), u16_0ImmPred:$Is),
// Transfer Register/immediate.
def : T_R_pat <A2_tfr, int_hexagon_A2_tfr>;
def : T_I_pat <A2_tfrsi, int_hexagon_A2_tfrsi>;
+def : T_I_pat <A2_tfrpi, int_hexagon_A2_tfrpi>;
// Assembler mapped from Rdd32=Rss32 to Rdd32=combine(Rss.H32,Rss.L32)
def : Pat<(int_hexagon_A2_tfrp DoubleRegs:$src),
@@ -690,15 +691,15 @@ def: T_RR_pat<A2_combine_hl, int_hexagon_A2_combine_hl>;
def: T_RR_pat<A2_combine_lh, int_hexagon_A2_combine_lh>;
def: T_RR_pat<A2_combine_ll, int_hexagon_A2_combine_ll>;
-def: T_II_pat<A2_combineii, int_hexagon_A2_combineii, s32ImmPred, s8ImmPred>;
+def: T_II_pat<A2_combineii, int_hexagon_A2_combineii, s8ExtPred, s8ImmPred>;
def: Pat<(i32 (int_hexagon_C2_mux (I32:$Rp), (I32:$Rs), (I32:$Rt))),
(i32 (C2_mux (C2_tfrrp IntRegs:$Rp), IntRegs:$Rs, IntRegs:$Rt))>;
// Mux
-def : T_QRI_pat<C2_muxir, int_hexagon_C2_muxir, s32ImmPred>;
-def : T_QIR_pat<C2_muxri, int_hexagon_C2_muxri, s32ImmPred>;
-def : T_QII_pat<C2_muxii, int_hexagon_C2_muxii, s32ImmPred, s8ImmPred>;
+def : T_QRI_pat<C2_muxir, int_hexagon_C2_muxir, s8ExtPred>;
+def : T_QIR_pat<C2_muxri, int_hexagon_C2_muxri, s8ExtPred>;
+def : T_QII_pat<C2_muxii, int_hexagon_C2_muxii, s8ExtPred, s8ImmPred>;
// Shift halfword
def : T_R_pat<A2_aslh, int_hexagon_A2_aslh>;
@@ -719,17 +720,17 @@ def : T_RR_pat<C2_cmpeq, int_hexagon_C2_cmpeq>;
def : T_RR_pat<C2_cmpgt, int_hexagon_C2_cmpgt>;
def : T_RR_pat<C2_cmpgtu, int_hexagon_C2_cmpgtu>;
-def : T_RI_pat<C2_cmpeqi, int_hexagon_C2_cmpeqi, s32ImmPred>;
-def : T_RI_pat<C2_cmpgti, int_hexagon_C2_cmpgti, s32ImmPred>;
-def : T_RI_pat<C2_cmpgtui, int_hexagon_C2_cmpgtui, u32ImmPred>;
+def : T_RI_pat<C2_cmpeqi, int_hexagon_C2_cmpeqi, s10ExtPred>;
+def : T_RI_pat<C2_cmpgti, int_hexagon_C2_cmpgti, s10ExtPred>;
+def : T_RI_pat<C2_cmpgtui, int_hexagon_C2_cmpgtui, u9ExtPred>;
-def : Pat <(i32 (int_hexagon_C2_cmpgei (I32:$src1), s32ImmPred:$src2)),
+def : Pat <(i32 (int_hexagon_C2_cmpgei (I32:$src1), s8ExtPred:$src2)),
(i32 (C2_cmpgti (I32:$src1),
- (DEC_CONST_SIGNED s32ImmPred:$src2)))>;
+ (DEC_CONST_SIGNED s8ExtPred:$src2)))>;
-def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), u32ImmPred:$src2)),
+def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), u8ExtPred:$src2)),
(i32 (C2_cmpgtui (I32:$src1),
- (DEC_CONST_UNSIGNED u32ImmPred:$src2)))>;
+ (DEC_CONST_UNSIGNED u8ExtPred:$src2)))>;
// The instruction, Pd=cmp.geu(Rs, #u8) -> Pd=cmp.eq(Rs,Rs) when #u8 == 0.
def : Pat <(i32 (int_hexagon_C2_cmpgeui (I32:$src1), 0)),
@@ -923,6 +924,10 @@ def: qi_CRInst_qiqi_pat<C2_or, int_hexagon_C2_or>;
def: qi_CRInst_qiqi_pat<C2_orn, int_hexagon_C2_orn>;
def: qi_CRInst_qiqi_pat<C2_xor, int_hexagon_C2_xor>;
+// Assembler mapped from Pd4=Ps4 to Pd4=or(Ps4,Ps4)
+def : Pat<(int_hexagon_C2_pxfer_map PredRegs:$src),
+ (C2_pxfer_map PredRegs:$src)>;
+
// Multiply 32x32 and use lower result
def : T_RRI_pat <M2_macsip, int_hexagon_M2_macsip>;
def : T_RRI_pat <M2_macsin, int_hexagon_M2_macsin>;
diff --git a/lib/Target/Hexagon/HexagonMachineFunctionInfo.h b/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
index 76723586c66e..5681ae29831f 100644
--- a/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
+++ b/lib/Target/Hexagon/HexagonMachineFunctionInfo.h
@@ -80,6 +80,6 @@ public:
void setStackAlignBaseVReg(unsigned R) { StackAlignBaseReg = R; }
unsigned getStackAlignBaseVReg() const { return StackAlignBaseReg; }
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.h b/lib/Target/Hexagon/HexagonMachineScheduler.h
index 60343442e327..fae16e2a0612 100644
--- a/lib/Target/Hexagon/HexagonMachineScheduler.h
+++ b/lib/Target/Hexagon/HexagonMachineScheduler.h
@@ -238,7 +238,7 @@ protected:
#endif
};
-} // namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp
index 81af4db912cc..707bfdbb6ab6 100644
--- a/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -60,6 +60,7 @@ static cl::opt<bool> DisableNewValueJumps("disable-nvjump", cl::Hidden,
cl::desc("Disable New Value Jumps"));
namespace llvm {
+ FunctionPass *createHexagonNewValueJump();
void initializeHexagonNewValueJumpPass(PassRegistry&);
}
diff --git a/lib/Target/Hexagon/HexagonOperands.td b/lib/Target/Hexagon/HexagonOperands.td
index be8204b7de53..2bece8f42f53 100644
--- a/lib/Target/Hexagon/HexagonOperands.td
+++ b/lib/Target/Hexagon/HexagonOperands.td
@@ -7,32 +7,24 @@
//
//===----------------------------------------------------------------------===//
+def s4_0ImmOperand : AsmOperandClass { let Name = "s4_0Imm"; }
+def s4_1ImmOperand : AsmOperandClass { let Name = "s4_1Imm"; }
+def s4_2ImmOperand : AsmOperandClass { let Name = "s4_2Imm"; }
+def s4_3ImmOperand : AsmOperandClass { let Name = "s4_3Imm"; }
+
// Immediate operands.
let PrintMethod = "printImmOperand" in {
- // f32Ext type is used to identify constant extended floating point immediates.
- def f32Ext : Operand<f32>;
def s32Imm : Operand<i32>;
- def s26_6Imm : Operand<i32>;
- def s16Imm : Operand<i32>;
- def s12Imm : Operand<i32>;
- def s11Imm : Operand<i32>;
- def s11_0Imm : Operand<i32>;
- def s11_1Imm : Operand<i32>;
- def s11_2Imm : Operand<i32>;
- def s11_3Imm : Operand<i32>;
- def s10Imm : Operand<i32>;
- def s9Imm : Operand<i32>;
- def m9Imm : Operand<i32>;
def s8Imm : Operand<i32>;
def s8Imm64 : Operand<i64>;
def s6Imm : Operand<i32>;
def s6_3Imm : Operand<i32>;
def s4Imm : Operand<i32>;
- def s4_0Imm : Operand<i32>;
- def s4_1Imm : Operand<i32>;
- def s4_2Imm : Operand<i32>;
- def s4_3Imm : Operand<i32>;
+ def s4_0Imm : Operand<i32> { let DecoderMethod = "s4_0ImmDecoder"; }
+ def s4_1Imm : Operand<i32> { let DecoderMethod = "s4_1ImmDecoder"; }
+ def s4_2Imm : Operand<i32> { let DecoderMethod = "s4_2ImmDecoder"; }
+ def s4_3Imm : Operand<i32> { let DecoderMethod = "s4_3ImmDecoder"; }
def u64Imm : Operand<i64>;
def u32Imm : Operand<i32>;
def u26_6Imm : Operand<i32>;
@@ -446,17 +438,18 @@ def SetClr3ImmPred : PatLeaf<(i32 imm), [{
// Extendable immediate operands.
let PrintMethod = "printExtOperand" in {
- def s16Ext : Operand<i32>;
- def s12Ext : Operand<i32>;
- def s10Ext : Operand<i32>;
- def s9Ext : Operand<i32>;
- def s8Ext : Operand<i32>;
+ def f32Ext : Operand<f32>;
+ def s16Ext : Operand<i32> { let DecoderMethod = "s16ImmDecoder"; }
+ def s12Ext : Operand<i32> { let DecoderMethod = "s12ImmDecoder"; }
+ def s11_0Ext : Operand<i32> { let DecoderMethod = "s11_0ImmDecoder"; }
+ def s11_1Ext : Operand<i32> { let DecoderMethod = "s11_1ImmDecoder"; }
+ def s11_2Ext : Operand<i32> { let DecoderMethod = "s11_2ImmDecoder"; }
+ def s11_3Ext : Operand<i32> { let DecoderMethod = "s11_3ImmDecoder"; }
+ def s10Ext : Operand<i32> { let DecoderMethod = "s10ImmDecoder"; }
+ def s9Ext : Operand<i32> { let DecoderMethod = "s90ImmDecoder"; }
+ def s8Ext : Operand<i32> { let DecoderMethod = "s8ImmDecoder"; }
def s7Ext : Operand<i32>;
- def s6Ext : Operand<i32>;
- def s11_0Ext : Operand<i32>;
- def s11_1Ext : Operand<i32>;
- def s11_2Ext : Operand<i32>;
- def s11_3Ext : Operand<i32>;
+ def s6Ext : Operand<i32> { let DecoderMethod = "s6_0ImmDecoder"; }
def u6Ext : Operand<i32>;
def u7Ext : Operand<i32>;
def u8Ext : Operand<i32>;
@@ -468,6 +461,46 @@ let PrintMethod = "printExtOperand" in {
def u6_3Ext : Operand<i32>;
}
+def s10ExtPred : PatLeaf<(i32 imm), [{
+ int64_t v = (int64_t)N->getSExtValue();
+ if (isInt<10>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
+}]>;
+
+def s8ExtPred : PatLeaf<(i32 imm), [{
+ int64_t v = (int64_t)N->getSExtValue();
+ if (isInt<8>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit signed field.
+ return isConstExtProfitable(Node) && isInt<32>(v);
+}]>;
+
+def u8ExtPred : PatLeaf<(i32 imm), [{
+ int64_t v = (int64_t)N->getSExtValue();
+ if (isUInt<8>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
+}]>;
+
+def u9ExtPred : PatLeaf<(i32 imm), [{
+ int64_t v = (int64_t)N->getSExtValue();
+ if (isUInt<9>(v))
+ return true;
+
+ // Return true if extending this immediate is profitable and the value
+ // can fit in a 32-bit unsigned field.
+ return isConstExtProfitable(Node) && isUInt<32>(v);
+}]>;
+
// This complex pattern exists only to create a machine instruction operand
// of type "frame index". There doesn't seem to be a way to do that directly
diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp
index 503bfdb6b3eb..94ec2e7ca6c1 100644
--- a/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -75,6 +75,7 @@ static cl::opt<bool> DisableOptExtTo64("disable-hexagon-opt-ext-to-64",
cl::desc("Disable Optimization of extensions to i64."));
namespace llvm {
+ FunctionPass *createHexagonPeephole();
void initializeHexagonPeepholePass(PassRegistry&);
}
@@ -103,7 +104,7 @@ namespace {
private:
void ChangeOpInto(MachineOperand &Dst, MachineOperand &Src);
};
-}
+} // namespace
char HexagonPeephole::ID = 0;
diff --git a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
index 0c2407508869..d586c395a9ad 100644
--- a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
+++ b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp
@@ -24,6 +24,7 @@
using namespace llvm;
namespace llvm {
+ FunctionPass *createHexagonRemoveExtendArgs(const HexagonTargetMachine &TM);
void initializeHexagonRemoveExtendArgsPass(PassRegistry&);
}
@@ -47,7 +48,7 @@ namespace {
FunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char HexagonRemoveExtendArgs::ID = 0;
diff --git a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
index 8ac2e43f9294..c72051ca1348 100644
--- a/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
+++ b/lib/Target/Hexagon/HexagonSelectionDAGInfo.h
@@ -32,6 +32,6 @@ public:
MachinePointerInfo SrcPtrInfo) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
index 4efb5f75af62..61bb7c5139e4 100644
--- a/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
+++ b/lib/Target/Hexagon/HexagonSplitConst32AndConst64.cpp
@@ -45,6 +45,11 @@ using namespace llvm;
#define DEBUG_TYPE "xfer"
+namespace llvm {
+ FunctionPass *createHexagonSplitConst32AndConst64();
+ void initializeHexagonSplitConst32AndConst64Pass(PassRegistry&);
+}
+
namespace {
class HexagonSplitConst32AndConst64 : public MachineFunctionPass {
@@ -151,7 +156,7 @@ bool HexagonSplitConst32AndConst64::runOnMachineFunction(MachineFunction &Fn) {
return true;
}
-}
+} // namespace
//===----------------------------------------------------------------------===//
// Public Constructor Functions
diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp
index d61cc5418a4a..fe6c4f4298b5 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -70,8 +70,8 @@ HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
return *this;
}
-HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS,
- const TargetMachine &TM)
+HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetMachine &TM)
: HexagonGenSubtargetInfo(TT, CPU, FS), CPUString(CPU),
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
TSInfo(*TM.getDataLayout()), FrameLowering() {
diff --git a/lib/Target/Hexagon/HexagonSubtarget.h b/lib/Target/Hexagon/HexagonSubtarget.h
index 780567bcd36b..34cdad786f82 100644
--- a/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/lib/Target/Hexagon/HexagonSubtarget.h
@@ -52,7 +52,7 @@ private:
InstrItineraryData InstrItins;
public:
- HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS,
+ HexagonSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
const TargetMachine &TM);
/// getInstrItins - Return the instruction itineraries based on subtarget
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 06798665cb05..90f1ced5420a 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -61,14 +61,30 @@ SchedCustomRegistry("hexagon", "Run Hexagon's custom scheduler",
namespace llvm {
FunctionPass *createHexagonExpandCondsets();
-}
+ FunctionPass *createHexagonISelDag(HexagonTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
+ FunctionPass *createHexagonDelaySlotFillerPass(const TargetMachine &TM);
+ FunctionPass *createHexagonFPMoverPass(const TargetMachine &TM);
+ FunctionPass *createHexagonRemoveExtendArgs(const HexagonTargetMachine &TM);
+ FunctionPass *createHexagonCFGOptimizer();
+
+ FunctionPass *createHexagonSplitConst32AndConst64();
+ FunctionPass *createHexagonExpandPredSpillCode();
+ FunctionPass *createHexagonHardwareLoops();
+ FunctionPass *createHexagonPeephole();
+ FunctionPass *createHexagonFixupHwLoops();
+ FunctionPass *createHexagonNewValueJump();
+ FunctionPass *createHexagonCopyToCombine();
+ FunctionPass *createHexagonPacketizer();
+ FunctionPass *createHexagonNewValueJump();
+} // namespace llvm
/// HexagonTargetMachine ctor - Create an ILP32 architecture model.
///
/// Hexagon_TODO: Do I need an aggregate alignment?
///
-HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT,
+HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.h b/lib/Target/Hexagon/HexagonTargetMachine.h
index 5774f7e195b0..115eadb98c33 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.h
+++ b/lib/Target/Hexagon/HexagonTargetMachine.h
@@ -27,7 +27,7 @@ class HexagonTargetMachine : public LLVMTargetMachine {
HexagonSubtarget Subtarget;
public:
- HexagonTargetMachine(const Target &T, StringRef TT,StringRef CPU,
+ HexagonTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
diff --git a/lib/Target/Hexagon/HexagonTargetStreamer.h b/lib/Target/Hexagon/HexagonTargetStreamer.h
new file mode 100644
index 000000000000..2b4a3ada506b
--- /dev/null
+++ b/lib/Target/Hexagon/HexagonTargetStreamer.h
@@ -0,0 +1,31 @@
+//===-- HexagonTargetStreamer.h - Hexagon Target Streamer ------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGONTARGETSTREAMER_H
+#define HEXAGONTARGETSTREAMER_H
+
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class HexagonTargetStreamer : public MCTargetStreamer {
+public:
+ HexagonTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {}
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0){};
+ virtual void emitFAlign(unsigned Size, unsigned MaxBytesToEmit){};
+ virtual void EmitCommonSymbolSorted(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment,
+ unsigned AccessGranularity){};
+ virtual void EmitLocalCommonSymbolSorted(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlign,
+ unsigned AccessGranularity){};
+};
+} // namespace llvm
+
+#endif
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index 0cc59bcc7671..66fdd65b3ea7 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -57,6 +57,7 @@ static cl::opt<bool> PacketizeVolatiles("hexagon-packetize-volatiles",
cl::desc("Allow non-solo packetization of volatile memory references"));
namespace llvm {
+ FunctionPass *createHexagonPacketizer();
void initializeHexagonPacketizerPass(PassRegistry&);
}
@@ -169,7 +170,7 @@ namespace {
void reserveResourcesForConstExt(MachineInstr* MI);
bool isNewValueInst(MachineInstr* MI);
};
-}
+} // namespace
INITIALIZE_PASS_BEGIN(HexagonPacketizer, "packets", "Hexagon Packetizer",
false, false)
diff --git a/lib/Target/Hexagon/LLVMBuild.txt b/lib/Target/Hexagon/LLVMBuild.txt
index 6ffd26a2022a..8259055b3f41 100644
--- a/lib/Target/Hexagon/LLVMBuild.txt
+++ b/lib/Target/Hexagon/LLVMBuild.txt
@@ -28,5 +28,15 @@ has_asmprinter = 1
type = Library
name = HexagonCodeGen
parent = Hexagon
-required_libraries = Analysis AsmPrinter CodeGen Core HexagonDesc HexagonInfo MC SelectionDAG Support Target
+required_libraries =
+ Analysis
+ AsmPrinter
+ CodeGen
+ Core
+ HexagonDesc
+ HexagonInfo
+ MC
+ SelectionDAG
+ Support
+ Target
add_to_library_groups = Hexagon
diff --git a/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt b/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt
index 6253686b4993..5403b106cbbe 100644
--- a/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt
+++ b/lib/Target/Hexagon/MCTargetDesc/CMakeLists.txt
@@ -6,6 +6,7 @@ add_llvm_library(LLVMHexagonDesc
HexagonMCCodeEmitter.cpp
HexagonMCCompound.cpp
HexagonMCDuplexInfo.cpp
+ HexagonMCELFStreamer.cpp
HexagonMCInstrInfo.cpp
HexagonMCShuffler.cpp
HexagonMCTargetDesc.cpp
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
index 76894840153d..99ea2fabf867 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
@@ -17,11 +17,14 @@
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
using namespace Hexagon;
+#define DEBUG_TYPE "hexagon-asm-backend"
+
namespace {
class HexagonAsmBackend : public MCAsmBackend {
@@ -278,8 +281,26 @@ public:
llvm_unreachable("relaxInstruction() unimplemented");
}
- bool writeNopData(uint64_t /*Count*/,
- MCObjectWriter * /*OW*/) const override {
+ bool writeNopData(uint64_t Count,
+ MCObjectWriter * OW) const override {
+ static const uint32_t Nopcode = 0x7f000000, // Hard-coded NOP.
+ ParseIn = 0x00004000, // In packet parse-bits.
+ ParseEnd = 0x0000c000; // End of packet parse-bits.
+
+ while(Count % HEXAGON_INSTR_SIZE) {
+ DEBUG(dbgs() << "Alignment not a multiple of the instruction size:" <<
+ Count % HEXAGON_INSTR_SIZE << "/" << HEXAGON_INSTR_SIZE << "\n");
+ --Count;
+ OW->write8(0);
+ }
+
+ while(Count) {
+ Count -= HEXAGON_INSTR_SIZE;
+ // Close the packet whenever a multiple of the maximum packet size remains
+ uint32_t ParseBits = (Count % (HEXAGON_PACKET_SIZE * HEXAGON_INSTR_SIZE))?
+ ParseIn: ParseEnd;
+ OW->write32(Nopcode | ParseBits);
+ }
return true;
}
};
@@ -288,8 +309,8 @@ public:
namespace llvm {
MCAsmBackend *createHexagonAsmBackend(Target const &T,
MCRegisterInfo const & /*MRI*/,
- StringRef TT, StringRef CPU) {
- uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
+ const Triple &TT, StringRef CPU) {
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS());
return new HexagonAsmBackend(T, OSABI, CPU);
}
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp
index 843072302b21..0f7cf0e7fcbd 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonELFObjectWriter.cpp
@@ -31,318 +31,216 @@ public:
unsigned GetRelocType(MCValue const &Target, MCFixup const &Fixup,
bool IsPCRel) const override;
};
-}
+} // namespace
HexagonELFObjectWriter::HexagonELFObjectWriter(uint8_t OSABI, StringRef C)
: MCELFObjectTargetWriter(/*Is64bit*/ false, OSABI, ELF::EM_HEXAGON,
/*HasRelocationAddend*/ true),
CPU(C) {}
-unsigned HexagonELFObjectWriter::GetRelocType(MCValue const &/*Target*/,
+unsigned HexagonELFObjectWriter::GetRelocType(MCValue const & /*Target*/,
MCFixup const &Fixup,
bool IsPCRel) const {
- // determine the type of the relocation
- unsigned Type = (unsigned)ELF::R_HEX_NONE;
- unsigned Kind = (unsigned)Fixup.getKind();
-
- switch (Kind) {
- default:
- DEBUG(dbgs() << "unrecognized relocation " << Fixup.getKind() << "\n");
- llvm_unreachable("Unimplemented Fixup kind!");
- break;
- case FK_Data_4:
- Type = (IsPCRel) ? ELF::R_HEX_32_PCREL : ELF::R_HEX_32;
- break;
- case FK_PCRel_4:
- Type = ELF::R_HEX_32_PCREL;
- break;
- case FK_Data_2:
- Type = ELF::R_HEX_16;
- break;
- case FK_Data_1:
- Type = ELF::R_HEX_8;
- break;
- case fixup_Hexagon_B22_PCREL:
- Type = ELF::R_HEX_B22_PCREL;
- break;
- case fixup_Hexagon_B15_PCREL:
- Type = ELF::R_HEX_B15_PCREL;
- break;
- case fixup_Hexagon_B7_PCREL:
- Type = ELF::R_HEX_B7_PCREL;
- break;
- case fixup_Hexagon_LO16:
- Type = ELF::R_HEX_LO16;
- break;
- case fixup_Hexagon_HI16:
- Type = ELF::R_HEX_HI16;
- break;
- case fixup_Hexagon_32:
- Type = ELF::R_HEX_32;
- break;
- case fixup_Hexagon_16:
- Type = ELF::R_HEX_16;
- break;
- case fixup_Hexagon_8:
- Type = ELF::R_HEX_8;
- break;
- case fixup_Hexagon_GPREL16_0:
- Type = ELF::R_HEX_GPREL16_0;
- break;
- case fixup_Hexagon_GPREL16_1:
- Type = ELF::R_HEX_GPREL16_1;
- break;
- case fixup_Hexagon_GPREL16_2:
- Type = ELF::R_HEX_GPREL16_2;
- break;
- case fixup_Hexagon_GPREL16_3:
- Type = ELF::R_HEX_GPREL16_3;
- break;
- case fixup_Hexagon_HL16:
- Type = ELF::R_HEX_HL16;
- break;
- case fixup_Hexagon_B13_PCREL:
- Type = ELF::R_HEX_B13_PCREL;
- break;
- case fixup_Hexagon_B9_PCREL:
- Type = ELF::R_HEX_B9_PCREL;
- break;
- case fixup_Hexagon_B32_PCREL_X:
- Type = ELF::R_HEX_B32_PCREL_X;
- break;
- case fixup_Hexagon_32_6_X:
- Type = ELF::R_HEX_32_6_X;
- break;
- case fixup_Hexagon_B22_PCREL_X:
- Type = ELF::R_HEX_B22_PCREL_X;
- break;
- case fixup_Hexagon_B15_PCREL_X:
- Type = ELF::R_HEX_B15_PCREL_X;
- break;
- case fixup_Hexagon_B13_PCREL_X:
- Type = ELF::R_HEX_B13_PCREL_X;
- break;
- case fixup_Hexagon_B9_PCREL_X:
- Type = ELF::R_HEX_B9_PCREL_X;
- break;
- case fixup_Hexagon_B7_PCREL_X:
- Type = ELF::R_HEX_B7_PCREL_X;
- break;
- case fixup_Hexagon_16_X:
- Type = ELF::R_HEX_16_X;
- break;
- case fixup_Hexagon_12_X:
- Type = ELF::R_HEX_12_X;
- break;
- case fixup_Hexagon_11_X:
- Type = ELF::R_HEX_11_X;
- break;
- case fixup_Hexagon_10_X:
- Type = ELF::R_HEX_10_X;
- break;
- case fixup_Hexagon_9_X:
- Type = ELF::R_HEX_9_X;
- break;
- case fixup_Hexagon_8_X:
- Type = ELF::R_HEX_8_X;
- break;
- case fixup_Hexagon_7_X:
- Type = ELF::R_HEX_7_X;
- break;
- case fixup_Hexagon_6_X:
- Type = ELF::R_HEX_6_X;
- break;
- case fixup_Hexagon_32_PCREL:
- Type = ELF::R_HEX_32_PCREL;
- break;
- case fixup_Hexagon_COPY:
- Type = ELF::R_HEX_COPY;
- break;
- case fixup_Hexagon_GLOB_DAT:
- Type = ELF::R_HEX_GLOB_DAT;
- break;
- case fixup_Hexagon_JMP_SLOT:
- Type = ELF::R_HEX_JMP_SLOT;
- break;
- case fixup_Hexagon_RELATIVE:
- Type = ELF::R_HEX_RELATIVE;
- break;
- case fixup_Hexagon_PLT_B22_PCREL:
- Type = ELF::R_HEX_PLT_B22_PCREL;
- break;
- case fixup_Hexagon_GOTREL_LO16:
- Type = ELF::R_HEX_GOTREL_LO16;
- break;
- case fixup_Hexagon_GOTREL_HI16:
- Type = ELF::R_HEX_GOTREL_HI16;
- break;
- case fixup_Hexagon_GOTREL_32:
- Type = ELF::R_HEX_GOTREL_32;
- break;
- case fixup_Hexagon_GOT_LO16:
- Type = ELF::R_HEX_GOT_LO16;
- break;
- case fixup_Hexagon_GOT_HI16:
- Type = ELF::R_HEX_GOT_HI16;
- break;
- case fixup_Hexagon_GOT_32:
- Type = ELF::R_HEX_GOT_32;
- break;
- case fixup_Hexagon_GOT_16:
- Type = ELF::R_HEX_GOT_16;
- break;
- case fixup_Hexagon_DTPMOD_32:
- Type = ELF::R_HEX_DTPMOD_32;
- break;
- case fixup_Hexagon_DTPREL_LO16:
- Type = ELF::R_HEX_DTPREL_LO16;
- break;
- case fixup_Hexagon_DTPREL_HI16:
- Type = ELF::R_HEX_DTPREL_HI16;
- break;
- case fixup_Hexagon_DTPREL_32:
- Type = ELF::R_HEX_DTPREL_32;
- break;
- case fixup_Hexagon_DTPREL_16:
- Type = ELF::R_HEX_DTPREL_16;
- break;
- case fixup_Hexagon_GD_PLT_B22_PCREL:
- Type = ELF::R_HEX_GD_PLT_B22_PCREL;
- break;
- case fixup_Hexagon_LD_PLT_B22_PCREL:
- Type = ELF::R_HEX_LD_PLT_B22_PCREL;
- break;
- case fixup_Hexagon_GD_GOT_LO16:
- Type = ELF::R_HEX_GD_GOT_LO16;
- break;
- case fixup_Hexagon_GD_GOT_HI16:
- Type = ELF::R_HEX_GD_GOT_HI16;
- break;
- case fixup_Hexagon_GD_GOT_32:
- Type = ELF::R_HEX_GD_GOT_32;
- break;
- case fixup_Hexagon_GD_GOT_16:
- Type = ELF::R_HEX_GD_GOT_16;
- break;
- case fixup_Hexagon_LD_GOT_LO16:
- Type = ELF::R_HEX_LD_GOT_LO16;
- break;
- case fixup_Hexagon_LD_GOT_HI16:
- Type = ELF::R_HEX_LD_GOT_HI16;
- break;
- case fixup_Hexagon_LD_GOT_32:
- Type = ELF::R_HEX_LD_GOT_32;
- break;
- case fixup_Hexagon_LD_GOT_16:
- Type = ELF::R_HEX_LD_GOT_16;
- break;
- case fixup_Hexagon_IE_LO16:
- Type = ELF::R_HEX_IE_LO16;
- break;
- case fixup_Hexagon_IE_HI16:
- Type = ELF::R_HEX_IE_HI16;
- break;
- case fixup_Hexagon_IE_32:
- Type = ELF::R_HEX_IE_32;
- break;
- case fixup_Hexagon_IE_GOT_LO16:
- Type = ELF::R_HEX_IE_GOT_LO16;
- break;
- case fixup_Hexagon_IE_GOT_HI16:
- Type = ELF::R_HEX_IE_GOT_HI16;
- break;
- case fixup_Hexagon_IE_GOT_32:
- Type = ELF::R_HEX_IE_GOT_32;
- break;
- case fixup_Hexagon_IE_GOT_16:
- Type = ELF::R_HEX_IE_GOT_16;
- break;
- case fixup_Hexagon_TPREL_LO16:
- Type = ELF::R_HEX_TPREL_LO16;
- break;
- case fixup_Hexagon_TPREL_HI16:
- Type = ELF::R_HEX_TPREL_HI16;
- break;
- case fixup_Hexagon_TPREL_32:
- Type = ELF::R_HEX_TPREL_32;
- break;
- case fixup_Hexagon_TPREL_16:
- Type = ELF::R_HEX_TPREL_16;
- break;
- case fixup_Hexagon_6_PCREL_X:
- Type = ELF::R_HEX_6_PCREL_X;
- break;
- case fixup_Hexagon_GOTREL_32_6_X:
- Type = ELF::R_HEX_GOTREL_32_6_X;
- break;
- case fixup_Hexagon_GOTREL_16_X:
- Type = ELF::R_HEX_GOTREL_16_X;
- break;
- case fixup_Hexagon_GOTREL_11_X:
- Type = ELF::R_HEX_GOTREL_11_X;
- break;
- case fixup_Hexagon_GOT_32_6_X:
- Type = ELF::R_HEX_GOT_32_6_X;
- break;
- case fixup_Hexagon_GOT_16_X:
- Type = ELF::R_HEX_GOT_16_X;
- break;
- case fixup_Hexagon_GOT_11_X:
- Type = ELF::R_HEX_GOT_11_X;
- break;
- case fixup_Hexagon_DTPREL_32_6_X:
- Type = ELF::R_HEX_DTPREL_32_6_X;
- break;
- case fixup_Hexagon_DTPREL_16_X:
- Type = ELF::R_HEX_DTPREL_16_X;
- break;
- case fixup_Hexagon_DTPREL_11_X:
- Type = ELF::R_HEX_DTPREL_11_X;
- break;
- case fixup_Hexagon_GD_GOT_32_6_X:
- Type = ELF::R_HEX_GD_GOT_32_6_X;
- break;
- case fixup_Hexagon_GD_GOT_16_X:
- Type = ELF::R_HEX_GD_GOT_16_X;
- break;
- case fixup_Hexagon_GD_GOT_11_X:
- Type = ELF::R_HEX_GD_GOT_11_X;
- break;
- case fixup_Hexagon_LD_GOT_32_6_X:
- Type = ELF::R_HEX_LD_GOT_32_6_X;
- break;
- case fixup_Hexagon_LD_GOT_16_X:
- Type = ELF::R_HEX_LD_GOT_16_X;
- break;
- case fixup_Hexagon_LD_GOT_11_X:
- Type = ELF::R_HEX_LD_GOT_11_X;
- break;
- case fixup_Hexagon_IE_32_6_X:
- Type = ELF::R_HEX_IE_32_6_X;
- break;
- case fixup_Hexagon_IE_16_X:
- Type = ELF::R_HEX_IE_16_X;
- break;
- case fixup_Hexagon_IE_GOT_32_6_X:
- Type = ELF::R_HEX_IE_GOT_32_6_X;
- break;
- case fixup_Hexagon_IE_GOT_16_X:
- Type = ELF::R_HEX_IE_GOT_16_X;
- break;
- case fixup_Hexagon_IE_GOT_11_X:
- Type = ELF::R_HEX_IE_GOT_11_X;
- break;
- case fixup_Hexagon_TPREL_32_6_X:
- Type = ELF::R_HEX_TPREL_32_6_X;
- break;
- case fixup_Hexagon_TPREL_16_X:
- Type = ELF::R_HEX_TPREL_16_X;
- break;
- case fixup_Hexagon_TPREL_11_X:
- Type = ELF::R_HEX_TPREL_11_X;
- break;
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ DEBUG(dbgs() << "unrecognized relocation " << Fixup.getKind() << "\n");
+ llvm_unreachable("Unimplemented Fixup kind!");
+ return ELF::R_HEX_NONE;
+ case FK_Data_4:
+ return (IsPCRel) ? ELF::R_HEX_32_PCREL : ELF::R_HEX_32;
+ case FK_PCRel_4:
+ return ELF::R_HEX_32_PCREL;
+ case FK_Data_2:
+ return ELF::R_HEX_16;
+ case FK_Data_1:
+ return ELF::R_HEX_8;
+ case fixup_Hexagon_B22_PCREL:
+ return ELF::R_HEX_B22_PCREL;
+ case fixup_Hexagon_B15_PCREL:
+ return ELF::R_HEX_B15_PCREL;
+ case fixup_Hexagon_B7_PCREL:
+ return ELF::R_HEX_B7_PCREL;
+ case fixup_Hexagon_LO16:
+ return ELF::R_HEX_LO16;
+ case fixup_Hexagon_HI16:
+ return ELF::R_HEX_HI16;
+ case fixup_Hexagon_32:
+ return ELF::R_HEX_32;
+ case fixup_Hexagon_16:
+ return ELF::R_HEX_16;
+ case fixup_Hexagon_8:
+ return ELF::R_HEX_8;
+ case fixup_Hexagon_GPREL16_0:
+ return ELF::R_HEX_GPREL16_0;
+ case fixup_Hexagon_GPREL16_1:
+ return ELF::R_HEX_GPREL16_1;
+ case fixup_Hexagon_GPREL16_2:
+ return ELF::R_HEX_GPREL16_2;
+ case fixup_Hexagon_GPREL16_3:
+ return ELF::R_HEX_GPREL16_3;
+ case fixup_Hexagon_HL16:
+ return ELF::R_HEX_HL16;
+ case fixup_Hexagon_B13_PCREL:
+ return ELF::R_HEX_B13_PCREL;
+ case fixup_Hexagon_B9_PCREL:
+ return ELF::R_HEX_B9_PCREL;
+ case fixup_Hexagon_B32_PCREL_X:
+ return ELF::R_HEX_B32_PCREL_X;
+ case fixup_Hexagon_32_6_X:
+ return ELF::R_HEX_32_6_X;
+ case fixup_Hexagon_B22_PCREL_X:
+ return ELF::R_HEX_B22_PCREL_X;
+ case fixup_Hexagon_B15_PCREL_X:
+ return ELF::R_HEX_B15_PCREL_X;
+ case fixup_Hexagon_B13_PCREL_X:
+ return ELF::R_HEX_B13_PCREL_X;
+ case fixup_Hexagon_B9_PCREL_X:
+ return ELF::R_HEX_B9_PCREL_X;
+ case fixup_Hexagon_B7_PCREL_X:
+ return ELF::R_HEX_B7_PCREL_X;
+ case fixup_Hexagon_16_X:
+ return ELF::R_HEX_16_X;
+ case fixup_Hexagon_12_X:
+ return ELF::R_HEX_12_X;
+ case fixup_Hexagon_11_X:
+ return ELF::R_HEX_11_X;
+ case fixup_Hexagon_10_X:
+ return ELF::R_HEX_10_X;
+ case fixup_Hexagon_9_X:
+ return ELF::R_HEX_9_X;
+ case fixup_Hexagon_8_X:
+ return ELF::R_HEX_8_X;
+ case fixup_Hexagon_7_X:
+ return ELF::R_HEX_7_X;
+ case fixup_Hexagon_6_X:
+ return ELF::R_HEX_6_X;
+ case fixup_Hexagon_32_PCREL:
+ return ELF::R_HEX_32_PCREL;
+ case fixup_Hexagon_COPY:
+ return ELF::R_HEX_COPY;
+ case fixup_Hexagon_GLOB_DAT:
+ return ELF::R_HEX_GLOB_DAT;
+ case fixup_Hexagon_JMP_SLOT:
+ return ELF::R_HEX_JMP_SLOT;
+ case fixup_Hexagon_RELATIVE:
+ return ELF::R_HEX_RELATIVE;
+ case fixup_Hexagon_PLT_B22_PCREL:
+ return ELF::R_HEX_PLT_B22_PCREL;
+ case fixup_Hexagon_GOTREL_LO16:
+ return ELF::R_HEX_GOTREL_LO16;
+ case fixup_Hexagon_GOTREL_HI16:
+ return ELF::R_HEX_GOTREL_HI16;
+ case fixup_Hexagon_GOTREL_32:
+ return ELF::R_HEX_GOTREL_32;
+ case fixup_Hexagon_GOT_LO16:
+ return ELF::R_HEX_GOT_LO16;
+ case fixup_Hexagon_GOT_HI16:
+ return ELF::R_HEX_GOT_HI16;
+ case fixup_Hexagon_GOT_32:
+ return ELF::R_HEX_GOT_32;
+ case fixup_Hexagon_GOT_16:
+ return ELF::R_HEX_GOT_16;
+ case fixup_Hexagon_DTPMOD_32:
+ return ELF::R_HEX_DTPMOD_32;
+ case fixup_Hexagon_DTPREL_LO16:
+ return ELF::R_HEX_DTPREL_LO16;
+ case fixup_Hexagon_DTPREL_HI16:
+ return ELF::R_HEX_DTPREL_HI16;
+ case fixup_Hexagon_DTPREL_32:
+ return ELF::R_HEX_DTPREL_32;
+ case fixup_Hexagon_DTPREL_16:
+ return ELF::R_HEX_DTPREL_16;
+ case fixup_Hexagon_GD_PLT_B22_PCREL:
+ return ELF::R_HEX_GD_PLT_B22_PCREL;
+ case fixup_Hexagon_LD_PLT_B22_PCREL:
+ return ELF::R_HEX_LD_PLT_B22_PCREL;
+ case fixup_Hexagon_GD_GOT_LO16:
+ return ELF::R_HEX_GD_GOT_LO16;
+ case fixup_Hexagon_GD_GOT_HI16:
+ return ELF::R_HEX_GD_GOT_HI16;
+ case fixup_Hexagon_GD_GOT_32:
+ return ELF::R_HEX_GD_GOT_32;
+ case fixup_Hexagon_GD_GOT_16:
+ return ELF::R_HEX_GD_GOT_16;
+ case fixup_Hexagon_LD_GOT_LO16:
+ return ELF::R_HEX_LD_GOT_LO16;
+ case fixup_Hexagon_LD_GOT_HI16:
+ return ELF::R_HEX_LD_GOT_HI16;
+ case fixup_Hexagon_LD_GOT_32:
+ return ELF::R_HEX_LD_GOT_32;
+ case fixup_Hexagon_LD_GOT_16:
+ return ELF::R_HEX_LD_GOT_16;
+ case fixup_Hexagon_IE_LO16:
+ return ELF::R_HEX_IE_LO16;
+ case fixup_Hexagon_IE_HI16:
+ return ELF::R_HEX_IE_HI16;
+ case fixup_Hexagon_IE_32:
+ return ELF::R_HEX_IE_32;
+ case fixup_Hexagon_IE_GOT_LO16:
+ return ELF::R_HEX_IE_GOT_LO16;
+ case fixup_Hexagon_IE_GOT_HI16:
+ return ELF::R_HEX_IE_GOT_HI16;
+ case fixup_Hexagon_IE_GOT_32:
+ return ELF::R_HEX_IE_GOT_32;
+ case fixup_Hexagon_IE_GOT_16:
+ return ELF::R_HEX_IE_GOT_16;
+ case fixup_Hexagon_TPREL_LO16:
+ return ELF::R_HEX_TPREL_LO16;
+ case fixup_Hexagon_TPREL_HI16:
+ return ELF::R_HEX_TPREL_HI16;
+ case fixup_Hexagon_TPREL_32:
+ return ELF::R_HEX_TPREL_32;
+ case fixup_Hexagon_TPREL_16:
+ return ELF::R_HEX_TPREL_16;
+ case fixup_Hexagon_6_PCREL_X:
+ return ELF::R_HEX_6_PCREL_X;
+ case fixup_Hexagon_GOTREL_32_6_X:
+ return ELF::R_HEX_GOTREL_32_6_X;
+ case fixup_Hexagon_GOTREL_16_X:
+ return ELF::R_HEX_GOTREL_16_X;
+ case fixup_Hexagon_GOTREL_11_X:
+ return ELF::R_HEX_GOTREL_11_X;
+ case fixup_Hexagon_GOT_32_6_X:
+ return ELF::R_HEX_GOT_32_6_X;
+ case fixup_Hexagon_GOT_16_X:
+ return ELF::R_HEX_GOT_16_X;
+ case fixup_Hexagon_GOT_11_X:
+ return ELF::R_HEX_GOT_11_X;
+ case fixup_Hexagon_DTPREL_32_6_X:
+ return ELF::R_HEX_DTPREL_32_6_X;
+ case fixup_Hexagon_DTPREL_16_X:
+ return ELF::R_HEX_DTPREL_16_X;
+ case fixup_Hexagon_DTPREL_11_X:
+ return ELF::R_HEX_DTPREL_11_X;
+ case fixup_Hexagon_GD_GOT_32_6_X:
+ return ELF::R_HEX_GD_GOT_32_6_X;
+ case fixup_Hexagon_GD_GOT_16_X:
+ return ELF::R_HEX_GD_GOT_16_X;
+ case fixup_Hexagon_GD_GOT_11_X:
+ return ELF::R_HEX_GD_GOT_11_X;
+ case fixup_Hexagon_LD_GOT_32_6_X:
+ return ELF::R_HEX_LD_GOT_32_6_X;
+ case fixup_Hexagon_LD_GOT_16_X:
+ return ELF::R_HEX_LD_GOT_16_X;
+ case fixup_Hexagon_LD_GOT_11_X:
+ return ELF::R_HEX_LD_GOT_11_X;
+ case fixup_Hexagon_IE_32_6_X:
+ return ELF::R_HEX_IE_32_6_X;
+ case fixup_Hexagon_IE_16_X:
+ return ELF::R_HEX_IE_16_X;
+ case fixup_Hexagon_IE_GOT_32_6_X:
+ return ELF::R_HEX_IE_GOT_32_6_X;
+ case fixup_Hexagon_IE_GOT_16_X:
+ return ELF::R_HEX_IE_GOT_16_X;
+ case fixup_Hexagon_IE_GOT_11_X:
+ return ELF::R_HEX_IE_GOT_11_X;
+ case fixup_Hexagon_TPREL_32_6_X:
+ return ELF::R_HEX_TPREL_32_6_X;
+ case fixup_Hexagon_TPREL_16_X:
+ return ELF::R_HEX_TPREL_16_X;
+ case fixup_Hexagon_TPREL_11_X:
+ return ELF::R_HEX_TPREL_11_X;
}
- return Type;
}
MCObjectWriter *llvm::createHexagonELFObjectWriter(raw_pwrite_stream &OS,
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
index 1eee852996fd..6f8cb90f18f9 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp
@@ -342,6 +342,36 @@ static Hexagon::Fixups getFixupNoBits(MCInstrInfo const &MCII, const MCInst &MI,
return LastTargetFixupKind;
}
+namespace llvm {
+extern const MCInstrDesc HexagonInsts[];
+}
+
+namespace {
+ bool isPCRel (unsigned Kind) {
+ switch(Kind){
+ case fixup_Hexagon_B22_PCREL:
+ case fixup_Hexagon_B15_PCREL:
+ case fixup_Hexagon_B7_PCREL:
+ case fixup_Hexagon_B13_PCREL:
+ case fixup_Hexagon_B9_PCREL:
+ case fixup_Hexagon_B32_PCREL_X:
+ case fixup_Hexagon_B22_PCREL_X:
+ case fixup_Hexagon_B15_PCREL_X:
+ case fixup_Hexagon_B13_PCREL_X:
+ case fixup_Hexagon_B9_PCREL_X:
+ case fixup_Hexagon_B7_PCREL_X:
+ case fixup_Hexagon_32_PCREL:
+ case fixup_Hexagon_PLT_B22_PCREL:
+ case fixup_Hexagon_GD_PLT_B22_PCREL:
+ case fixup_Hexagon_LD_PLT_B22_PCREL:
+ case fixup_Hexagon_6_PCREL_X:
+ return true;
+ default:
+ return false;
+ }
+ }
+} // namespace
+
unsigned HexagonMCCodeEmitter::getExprOpValue(const MCInst &MI,
const MCOperand &MO,
const MCExpr *ME,
@@ -363,7 +393,7 @@ unsigned HexagonMCCodeEmitter::getExprOpValue(const MCInst &MI,
Res = getExprOpValue(MI, MO, cast<MCBinaryExpr>(ME)->getLHS(), Fixups, STI);
Res +=
getExprOpValue(MI, MO, cast<MCBinaryExpr>(ME)->getRHS(), Fixups, STI);
- return Res;
+ return 0;
}
assert(MK == MCExpr::SymbolRef);
@@ -662,8 +692,13 @@ unsigned HexagonMCCodeEmitter::getExprOpValue(const MCInst &MI,
break;
}
- MCFixup fixup =
- MCFixup::create(*Addend, MO.getExpr(), MCFixupKind(FixupKind));
+ MCExpr const *FixupExpression = (*Addend > 0 && isPCRel(FixupKind)) ?
+ MCBinaryExpr::createAdd(MO.getExpr(),
+ MCConstantExpr::create(*Addend, MCT), MCT) :
+ MO.getExpr();
+
+ MCFixup fixup = MCFixup::create(*Addend, FixupExpression,
+ MCFixupKind(FixupKind), MI.getLoc());
Fixups.push_back(fixup);
// All of the information is in the fixup.
return (0);
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h
index 9aa258cee4c6..2a154da26c5d 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.h
@@ -44,8 +44,6 @@ public:
uint32_t parseBits(size_t Instruction, size_t Last, MCInst const &MCB,
MCInst const &MCI) const;
- MCSubtargetInfo const &getSubtargetInfo() const;
-
void encodeInstruction(MCInst const &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
MCSubtargetInfo const &STI) const override;
@@ -65,10 +63,6 @@ public:
unsigned getMachineOpValue(MCInst const &MI, MCOperand const &MO,
SmallVectorImpl<MCFixup> &Fixups,
MCSubtargetInfo const &STI) const;
-
-private:
- HexagonMCCodeEmitter(HexagonMCCodeEmitter const &) = delete;
- void operator=(HexagonMCCodeEmitter const &) = delete;
}; // class HexagonMCCodeEmitter
} // namespace llvm
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp
index 108093547f82..0d1f1e607e63 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp
@@ -40,39 +40,39 @@ enum OpcodeIndex {
tp1_jump_t
};
-unsigned tstBitOpcode[8] = {J4_tstbit0_fp0_jump_nt, J4_tstbit0_fp0_jump_t,
- J4_tstbit0_fp1_jump_nt, J4_tstbit0_fp1_jump_t,
- J4_tstbit0_tp0_jump_nt, J4_tstbit0_tp0_jump_t,
- J4_tstbit0_tp1_jump_nt, J4_tstbit0_tp1_jump_t};
-unsigned cmpeqBitOpcode[8] = {J4_cmpeq_fp0_jump_nt, J4_cmpeq_fp0_jump_t,
- J4_cmpeq_fp1_jump_nt, J4_cmpeq_fp1_jump_t,
- J4_cmpeq_tp0_jump_nt, J4_cmpeq_tp0_jump_t,
- J4_cmpeq_tp1_jump_nt, J4_cmpeq_tp1_jump_t};
-unsigned cmpgtBitOpcode[8] = {J4_cmpgt_fp0_jump_nt, J4_cmpgt_fp0_jump_t,
- J4_cmpgt_fp1_jump_nt, J4_cmpgt_fp1_jump_t,
- J4_cmpgt_tp0_jump_nt, J4_cmpgt_tp0_jump_t,
- J4_cmpgt_tp1_jump_nt, J4_cmpgt_tp1_jump_t};
-unsigned cmpgtuBitOpcode[8] = {J4_cmpgtu_fp0_jump_nt, J4_cmpgtu_fp0_jump_t,
- J4_cmpgtu_fp1_jump_nt, J4_cmpgtu_fp1_jump_t,
- J4_cmpgtu_tp0_jump_nt, J4_cmpgtu_tp0_jump_t,
- J4_cmpgtu_tp1_jump_nt, J4_cmpgtu_tp1_jump_t};
-unsigned cmpeqiBitOpcode[8] = {J4_cmpeqi_fp0_jump_nt, J4_cmpeqi_fp0_jump_t,
- J4_cmpeqi_fp1_jump_nt, J4_cmpeqi_fp1_jump_t,
- J4_cmpeqi_tp0_jump_nt, J4_cmpeqi_tp0_jump_t,
- J4_cmpeqi_tp1_jump_nt, J4_cmpeqi_tp1_jump_t};
-unsigned cmpgtiBitOpcode[8] = {J4_cmpgti_fp0_jump_nt, J4_cmpgti_fp0_jump_t,
- J4_cmpgti_fp1_jump_nt, J4_cmpgti_fp1_jump_t,
- J4_cmpgti_tp0_jump_nt, J4_cmpgti_tp0_jump_t,
- J4_cmpgti_tp1_jump_nt, J4_cmpgti_tp1_jump_t};
-unsigned cmpgtuiBitOpcode[8] = {J4_cmpgtui_fp0_jump_nt, J4_cmpgtui_fp0_jump_t,
- J4_cmpgtui_fp1_jump_nt, J4_cmpgtui_fp1_jump_t,
- J4_cmpgtui_tp0_jump_nt, J4_cmpgtui_tp0_jump_t,
- J4_cmpgtui_tp1_jump_nt, J4_cmpgtui_tp1_jump_t};
-unsigned cmpeqn1BitOpcode[8] = {J4_cmpeqn1_fp0_jump_nt, J4_cmpeqn1_fp0_jump_t,
- J4_cmpeqn1_fp1_jump_nt, J4_cmpeqn1_fp1_jump_t,
- J4_cmpeqn1_tp0_jump_nt, J4_cmpeqn1_tp0_jump_t,
- J4_cmpeqn1_tp1_jump_nt, J4_cmpeqn1_tp1_jump_t};
-unsigned cmpgtn1BitOpcode[8] = {
+static const unsigned tstBitOpcode[8] = {
+ J4_tstbit0_fp0_jump_nt, J4_tstbit0_fp0_jump_t, J4_tstbit0_fp1_jump_nt,
+ J4_tstbit0_fp1_jump_t, J4_tstbit0_tp0_jump_nt, J4_tstbit0_tp0_jump_t,
+ J4_tstbit0_tp1_jump_nt, J4_tstbit0_tp1_jump_t};
+static const unsigned cmpeqBitOpcode[8] = {
+ J4_cmpeq_fp0_jump_nt, J4_cmpeq_fp0_jump_t, J4_cmpeq_fp1_jump_nt,
+ J4_cmpeq_fp1_jump_t, J4_cmpeq_tp0_jump_nt, J4_cmpeq_tp0_jump_t,
+ J4_cmpeq_tp1_jump_nt, J4_cmpeq_tp1_jump_t};
+static const unsigned cmpgtBitOpcode[8] = {
+ J4_cmpgt_fp0_jump_nt, J4_cmpgt_fp0_jump_t, J4_cmpgt_fp1_jump_nt,
+ J4_cmpgt_fp1_jump_t, J4_cmpgt_tp0_jump_nt, J4_cmpgt_tp0_jump_t,
+ J4_cmpgt_tp1_jump_nt, J4_cmpgt_tp1_jump_t};
+static const unsigned cmpgtuBitOpcode[8] = {
+ J4_cmpgtu_fp0_jump_nt, J4_cmpgtu_fp0_jump_t, J4_cmpgtu_fp1_jump_nt,
+ J4_cmpgtu_fp1_jump_t, J4_cmpgtu_tp0_jump_nt, J4_cmpgtu_tp0_jump_t,
+ J4_cmpgtu_tp1_jump_nt, J4_cmpgtu_tp1_jump_t};
+static const unsigned cmpeqiBitOpcode[8] = {
+ J4_cmpeqi_fp0_jump_nt, J4_cmpeqi_fp0_jump_t, J4_cmpeqi_fp1_jump_nt,
+ J4_cmpeqi_fp1_jump_t, J4_cmpeqi_tp0_jump_nt, J4_cmpeqi_tp0_jump_t,
+ J4_cmpeqi_tp1_jump_nt, J4_cmpeqi_tp1_jump_t};
+static const unsigned cmpgtiBitOpcode[8] = {
+ J4_cmpgti_fp0_jump_nt, J4_cmpgti_fp0_jump_t, J4_cmpgti_fp1_jump_nt,
+ J4_cmpgti_fp1_jump_t, J4_cmpgti_tp0_jump_nt, J4_cmpgti_tp0_jump_t,
+ J4_cmpgti_tp1_jump_nt, J4_cmpgti_tp1_jump_t};
+static const unsigned cmpgtuiBitOpcode[8] = {
+ J4_cmpgtui_fp0_jump_nt, J4_cmpgtui_fp0_jump_t, J4_cmpgtui_fp1_jump_nt,
+ J4_cmpgtui_fp1_jump_t, J4_cmpgtui_tp0_jump_nt, J4_cmpgtui_tp0_jump_t,
+ J4_cmpgtui_tp1_jump_nt, J4_cmpgtui_tp1_jump_t};
+static const unsigned cmpeqn1BitOpcode[8] = {
+ J4_cmpeqn1_fp0_jump_nt, J4_cmpeqn1_fp0_jump_t, J4_cmpeqn1_fp1_jump_nt,
+ J4_cmpeqn1_fp1_jump_t, J4_cmpeqn1_tp0_jump_nt, J4_cmpeqn1_tp0_jump_t,
+ J4_cmpeqn1_tp1_jump_nt, J4_cmpeqn1_tp1_jump_t};
+static const unsigned cmpgtn1BitOpcode[8] = {
J4_cmpgtn1_fp0_jump_nt, J4_cmpgtn1_fp0_jump_t, J4_cmpgtn1_fp1_jump_nt,
J4_cmpgtn1_fp1_jump_t, J4_cmpgtn1_tp0_jump_nt, J4_cmpgtn1_tp0_jump_t,
J4_cmpgtn1_tp1_jump_nt, J4_cmpgtn1_tp1_jump_t,
@@ -174,7 +174,7 @@ unsigned getCompoundCandidateGroup(MCInst const &MI, bool IsExtended) {
return HexagonII::HCG_None;
}
-}
+} // namespace
/// getCompoundOp - Return the index from 0-7 into the above opcode lists.
namespace {
@@ -199,7 +199,7 @@ unsigned getCompoundOp(MCInst const &HMCI) {
return (PredReg == Hexagon::P0) ? tp0_jump_t : tp1_jump_t;
}
}
-}
+} // namespace
namespace {
MCInst *getCompoundInsn(MCContext &Context, MCInst const &L, MCInst const &R) {
@@ -331,7 +331,7 @@ MCInst *getCompoundInsn(MCContext &Context, MCInst const &L, MCInst const &R) {
return CompoundInsn;
}
-}
+} // namespace
/// Non-Symmetrical. See if these two instructions are fit for compound pair.
namespace {
@@ -348,7 +348,7 @@ bool isOrderedCompoundPair(MCInst const &MIa, bool IsExtendedA,
return ((MIaG == HexagonII::HCG_A && MIbG == HexagonII::HCG_B) &&
(MIa.getOperand(0).getReg() == MIb.getOperand(0).getReg()));
}
-}
+} // namespace
namespace {
bool lookForCompound(MCInstrInfo const &MCII, MCContext &Context, MCInst &MCI) {
@@ -396,7 +396,7 @@ bool lookForCompound(MCInstrInfo const &MCII, MCContext &Context, MCInst &MCI) {
}
return false;
}
-}
+} // namespace
/// tryCompound - Given a bundle check for compound insns when one
/// is found update the contents fo the bundle with the compound insn.
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
index eb629774a2cd..7e9247cef6ad 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
@@ -394,8 +394,7 @@ unsigned HexagonMCInstrInfo::getDuplexCandidateGroup(MCInst const &MCI) {
Src1Reg = MCI.getOperand(0).getReg();
if (HexagonMCInstrInfo::isIntRegForSubInst(Src1Reg) &&
MCI.getOperand(1).isImm() && isUInt<4>(MCI.getOperand(1).getImm()) &&
- MCI.getOperand(2).isImm() && MCI.getOperand(2).isImm() &&
- isUInt<1>(MCI.getOperand(2).getImm())) {
+ MCI.getOperand(2).isImm() && isUInt<1>(MCI.getOperand(2).getImm())) {
return HexagonII::HSIG_S2;
}
break;
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
new file mode 100644
index 000000000000..bf51c3515e95
--- /dev/null
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp
@@ -0,0 +1,152 @@
+//=== HexagonMCELFStreamer.cpp - Hexagon subclass of MCELFStreamer -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a stub that parses a MCInst bundle and passes the
+// instructions on to the real streamer.
+//
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "hexagonmcelfstreamer"
+
+#include "Hexagon.h"
+#include "HexagonMCELFStreamer.h"
+#include "MCTargetDesc/HexagonBaseInfo.h"
+#include "MCTargetDesc/HexagonMCShuffler.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+static cl::opt<unsigned>
+ GPSize("gpsize", cl::NotHidden,
+ cl::desc("Global Pointer Addressing Size. The default size is 8."),
+ cl::Prefix, cl::init(8));
+
+void HexagonMCELFStreamer::EmitInstruction(const MCInst &MCK,
+ const MCSubtargetInfo &STI) {
+ MCInst HMI;
+ HMI.setOpcode(Hexagon::BUNDLE);
+ HMI.addOperand(MCOperand::createImm(0));
+ MCInst *MCB;
+
+ if (MCK.getOpcode() != Hexagon::BUNDLE) {
+ HMI.addOperand(MCOperand::createInst(&MCK));
+ MCB = &HMI;
+ } else
+ MCB = const_cast<MCInst *>(&MCK);
+
+ // Examines packet and pad the packet, if needed, when an
+ // end-loop is in the bundle.
+ HexagonMCInstrInfo::padEndloop(*MCB);
+ HexagonMCShuffle(*MCII, STI, *MCB);
+
+ assert(HexagonMCInstrInfo::bundleSize(*MCB) <= HEXAGON_PACKET_SIZE);
+ bool Extended = false;
+ for (auto &I : HexagonMCInstrInfo::bundleInstructions(*MCB)) {
+ MCInst *MCI = const_cast<MCInst *>(I.getInst());
+ if (Extended) {
+ if (HexagonMCInstrInfo::isDuplex(*MCII, *MCI)) {
+ MCInst *SubInst = const_cast<MCInst *>(MCI->getOperand(1).getInst());
+ HexagonMCInstrInfo::clampExtended(*MCII, *SubInst);
+ } else {
+ HexagonMCInstrInfo::clampExtended(*MCII, *MCI);
+ }
+ Extended = false;
+ } else {
+ Extended = HexagonMCInstrInfo::isImmext(*MCI);
+ }
+ }
+
+ // At this point, MCB is a bundle
+ // Iterate through the bundle and assign addends for the instructions
+ for (auto const &I : HexagonMCInstrInfo::bundleInstructions(*MCB)) {
+ MCInst *MCI = const_cast<MCInst *>(I.getInst());
+ EmitSymbol(*MCI);
+ }
+ MCObjectStreamer::EmitInstruction(*MCB, STI);
+}
+
+void HexagonMCELFStreamer::EmitSymbol(const MCInst &Inst) {
+ // Scan for values.
+ for (unsigned i = Inst.getNumOperands(); i--;)
+ if (Inst.getOperand(i).isExpr())
+ visitUsedExpr(*Inst.getOperand(i).getExpr());
+}
+
+// EmitCommonSymbol and EmitLocalCommonSymbol are extended versions of the
+// functions found in MCELFStreamer.cpp taking AccessSize as an additional
+// parameter.
+void HexagonMCELFStreamer::HexagonMCEmitCommonSymbol(MCSymbol *Symbol,
+ uint64_t Size,
+ unsigned ByteAlignment,
+ unsigned AccessSize) {
+ getAssembler().registerSymbol(*Symbol);
+ StringRef sbss[4] = {".sbss.1", ".sbss.2", ".sbss.4", ".sbss.8"};
+
+ auto ELFSymbol = cast<MCSymbolELF>(Symbol);
+ if (!ELFSymbol->isBindingSet()) {
+ ELFSymbol->setBinding(ELF::STB_GLOBAL);
+ ELFSymbol->setExternal(true);
+ }
+
+ ELFSymbol->setType(ELF::STT_OBJECT);
+
+ if (ELFSymbol->getBinding() == ELF::STB_LOCAL) {
+ StringRef SectionName =
+ ((AccessSize == 0) || (Size == 0) || (Size > GPSize))
+ ? ".bss"
+ : sbss[(Log2_64(AccessSize))];
+
+ MCSection *CrntSection = getCurrentSection().first;
+ MCSection *Section = getAssembler().getContext().getELFSection(
+ SectionName, ELF::SHT_NOBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
+ SwitchSection(Section);
+ AssignSection(Symbol, Section);
+
+ MCELFStreamer::EmitCommonSymbol(Symbol, Size, ByteAlignment);
+ SwitchSection(CrntSection);
+ } else {
+ if (ELFSymbol->declareCommon(Size, ByteAlignment))
+ report_fatal_error("Symbol: " + Symbol->getName() +
+ " redeclared as different type");
+ if ((AccessSize) && (Size <= GPSize)) {
+ uint64_t SectionIndex =
+ (AccessSize <= GPSize)
+ ? ELF::SHN_HEXAGON_SCOMMON + (Log2_64(AccessSize) + 1)
+ : (unsigned)ELF::SHN_HEXAGON_SCOMMON;
+ ELFSymbol->setIndex(SectionIndex);
+ }
+ }
+
+ ELFSymbol->setSize(MCConstantExpr::create(Size, getContext()));
+}
+
+void HexagonMCELFStreamer::HexagonMCEmitLocalCommonSymbol(
+ MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment,
+ unsigned AccessSize) {
+ getAssembler().registerSymbol(*Symbol);
+ auto ELFSymbol = cast<MCSymbolELF>(Symbol);
+ ELFSymbol->setBinding(ELF::STB_LOCAL);
+ ELFSymbol->setExternal(false);
+ HexagonMCEmitCommonSymbol(Symbol, Size, ByteAlignment, AccessSize);
+}
+
+namespace llvm {
+MCStreamer *createHexagonELFStreamer(MCContext &Context, MCAsmBackend &MAB,
+ raw_pwrite_stream &OS, MCCodeEmitter *CE) {
+ return new HexagonMCELFStreamer(Context, MAB, OS, CE);
+}
+}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.h
new file mode 100644
index 000000000000..d77c0cd16b37
--- /dev/null
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.h
@@ -0,0 +1,45 @@
+//===- HexagonMCELFStreamer.h - Hexagon subclass of MCElfStreamer ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HEXAGONMCELFSTREAMER_H
+#define HEXAGONMCELFSTREAMER_H
+
+#include "MCTargetDesc/HexagonMCCodeEmitter.h"
+#include "MCTargetDesc/HexagonMCInstrInfo.h"
+#include "MCTargetDesc/HexagonMCTargetDesc.h"
+#include "llvm/MC/MCELFStreamer.h"
+#include "HexagonTargetStreamer.h"
+
+namespace llvm {
+
+class HexagonMCELFStreamer : public MCELFStreamer {
+ std::unique_ptr<MCInstrInfo> MCII;
+
+public:
+ HexagonMCELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_pwrite_stream &OS, MCCodeEmitter *Emitter)
+ : MCELFStreamer(Context, TAB, OS, Emitter),
+ MCII(createHexagonMCInstrInfo()) {}
+
+ virtual void EmitInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI) override;
+ void EmitSymbol(const MCInst &Inst);
+ void HexagonMCEmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment,
+ unsigned AccessSize);
+ void HexagonMCEmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment, unsigned AccessSize);
+};
+
+MCStreamer *createHexagonELFStreamer(MCContext &Context, MCAsmBackend &MAB,
+ raw_pwrite_stream &OS, MCCodeEmitter *CE);
+
+} // namespace llvm
+
+#endif
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
index 2731278f0e41..e69a52de5c77 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp
@@ -35,6 +35,21 @@ size_t HexagonMCInstrInfo::bundleSize(MCInst const &MCI) {
return (1);
}
+void HexagonMCInstrInfo::clampExtended(MCInstrInfo const &MCII, MCInst &MCI) {
+ assert(HexagonMCInstrInfo::isExtendable(MCII, MCI) ||
+ HexagonMCInstrInfo::isExtended(MCII, MCI));
+ MCOperand &exOp =
+ MCI.getOperand(HexagonMCInstrInfo::getExtendableOp(MCII, MCI));
+ // If the extended value is a constant, then use it for the extended and
+ // for the extender instructions, masking off the lower 6 bits and
+ // including the assumed bits.
+ if (exOp.isImm()) {
+ unsigned Shift = HexagonMCInstrInfo::getExtentAlignment(MCII, MCI);
+ int64_t Bits = exOp.getImm();
+ exOp.setImm((Bits & 0x3f) << Shift);
+ }
+}
+
MCInst *HexagonMCInstrInfo::deriveDuplex(MCContext &Context, unsigned iClass,
MCInst const &inst0,
MCInst const &inst1) {
@@ -446,4 +461,4 @@ void HexagonMCInstrInfo::setOuterLoop(MCInst &MCI) {
MCOperand &Operand = MCI.getOperand(0);
Operand.setImm(Operand.getImm() | outerLoopMask);
}
-}
+} // namespace llvm
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
index 09f305f638e2..9f7562a20063 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h
@@ -41,14 +41,14 @@ int64_t const outerLoopMask = 1 << outerLoopOffset;
size_t const bundleInstructionsOffset = 1;
-// Returns the number of instructions in the bundle
-size_t bundleSize(MCInst const &MCI);
-
// Returns a iterator range of instructions in this bundle
iterator_range<MCInst::const_iterator> bundleInstructions(MCInst const &MCI);
-// Return the extender for instruction at Index or nullptr if none
-MCInst const *extenderForIndex(MCInst const &MCB, size_t Index);
+// Returns the number of instructions in the bundle
+size_t bundleSize(MCInst const &MCI);
+
+// Clamp off upper 26 bits of extendable operand for emission
+void clampExtended(MCInstrInfo const &MCII, MCInst &MCI);
// Create a duplex instruction given the two subinsts
MCInst *deriveDuplex(MCContext &Context, unsigned iClass, MCInst const &inst0,
@@ -57,6 +57,9 @@ MCInst *deriveDuplex(MCContext &Context, unsigned iClass, MCInst const &inst0,
// Convert this instruction in to a duplex subinst
MCInst deriveSubInst(MCInst const &Inst);
+// Return the extender for instruction at Index or nullptr if none
+MCInst const *extenderForIndex(MCInst const &MCB, size_t Index);
+
// Return memory access size
HexagonII::MemAccessSize getAccessSize(MCInstrInfo const &MCII,
MCInst const &MCI);
@@ -224,9 +227,9 @@ void setOuterLoop(MCInst &MCI);
// Would duplexing this instruction create a requirement to extend
bool subInstWouldBeExtended(MCInst const &potentialDuplex);
-// Attempt to find and replace compound pairs
+// Attempt to find and replace compound pairs
void tryCompound(MCInstrInfo const &MCII, MCContext &Context, MCInst &MCI);
-}
-}
+} // namespace HexagonMCInstrInfo
+} // namespace llvm
#endif // LLVM_LIB_TARGET_HEXAGON_MCTARGETDESC_HEXAGONMCINSTRINFO_H
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h
index a21cce1fc240..9c0e3f2bbf6e 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h
@@ -60,6 +60,6 @@ bool HexagonMCShuffle(MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
unsigned HexagonMCShuffle(MCInstrInfo const &MCII, MCSubtargetInfo const &STI,
MCContext &Context, MCInst &,
SmallVector<DuplexCandidate, 8>);
-}
+} // namespace llvm
#endif // HEXAGONMCSHUFFLER_H
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
index 43734ed6ca3f..4a4f0c21afa2 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -12,15 +12,20 @@
//===----------------------------------------------------------------------===//
#include "HexagonMCTargetDesc.h"
+#include "Hexagon.h"
#include "HexagonMCAsmInfo.h"
+#include "HexagonMCELFStreamer.h"
#include "MCTargetDesc/HexagonInstPrinter.h"
#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFStreamer.h"
#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MachineLocation.h"
+#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
@@ -48,12 +53,92 @@ static MCRegisterInfo *createHexagonMCRegisterInfo(StringRef TT) {
}
static MCSubtargetInfo *
-createHexagonMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS) {
+createHexagonMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
InitHexagonMCSubtargetInfo(X, TT, CPU, FS);
return X;
}
+namespace {
+class HexagonTargetAsmStreamer : public HexagonTargetStreamer {
+public:
+ HexagonTargetAsmStreamer(MCStreamer &S,
+ formatted_raw_ostream &, bool,
+ MCInstPrinter &)
+ : HexagonTargetStreamer(S) {}
+ void prettyPrintAsm(MCInstPrinter &InstPrinter, raw_ostream &OS,
+ const MCInst &Inst, const MCSubtargetInfo &STI) override {
+ assert(HexagonMCInstrInfo::isBundle(Inst));
+ assert(HexagonMCInstrInfo::bundleSize(Inst) <= HEXAGON_PACKET_SIZE);
+ std::string Buffer;
+ {
+ raw_string_ostream TempStream(Buffer);
+ InstPrinter.printInst(&Inst, TempStream, "", STI);
+ }
+ StringRef Contents(Buffer);
+ auto PacketBundle = Contents.rsplit('\n');
+ auto HeadTail = PacketBundle.first.split('\n');
+ auto Preamble = "\t{\n\t\t";
+ auto Separator = "";
+ while(!HeadTail.first.empty()) {
+ OS << Separator;
+ StringRef Inst;
+ auto Duplex = HeadTail.first.split('\v');
+ if(!Duplex.second.empty()){
+ OS << Duplex.first << "\n";
+ Inst = Duplex.second;
+ }
+ else {
+ if(!HeadTail.first.startswith("immext"))
+ Inst = Duplex.first;
+ }
+ OS << Preamble;
+ OS << Inst;
+ HeadTail = HeadTail.second.split('\n');
+ Preamble = "";
+ Separator = "\n\t\t";
+ }
+ if(HexagonMCInstrInfo::bundleSize(Inst) != 0)
+ OS << "\n\t}" << PacketBundle.second;
+ }
+};
+} // namespace
+
+namespace {
+class HexagonTargetELFStreamer : public HexagonTargetStreamer {
+public:
+ MCELFStreamer &getStreamer() {
+ return static_cast<MCELFStreamer &>(Streamer);
+ }
+ HexagonTargetELFStreamer(MCStreamer &S, MCSubtargetInfo const &STI)
+ : HexagonTargetStreamer(S) {
+ auto Bits = STI.getFeatureBits();
+ unsigned Flags;
+ if (Bits.to_ullong() & llvm::Hexagon::ArchV5)
+ Flags = ELF::EF_HEXAGON_MACH_V5;
+ else
+ Flags = ELF::EF_HEXAGON_MACH_V4;
+ getStreamer().getAssembler().setELFHeaderEFlags(Flags);
+ }
+ void EmitCommonSymbolSorted(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment,
+ unsigned AccessSize) override {
+ HexagonMCELFStreamer &HexagonELFStreamer =
+ static_cast<HexagonMCELFStreamer &>(getStreamer());
+ HexagonELFStreamer.HexagonMCEmitCommonSymbol(Symbol, Size, ByteAlignment,
+ AccessSize);
+ }
+ void EmitLocalCommonSymbolSorted(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment,
+ unsigned AccessSize) override {
+ HexagonMCELFStreamer &HexagonELFStreamer =
+ static_cast<HexagonMCELFStreamer &>(getStreamer());
+ HexagonELFStreamer.HexagonMCEmitLocalCommonSymbol(
+ Symbol, Size, ByteAlignment, AccessSize);
+ }
+};
+} // namespace
+
static MCAsmInfo *createHexagonMCAsmInfo(const MCRegisterInfo &MRI,
const Triple &TT) {
MCAsmInfo *MAI = new HexagonMCAsmInfo(TT);
@@ -82,9 +167,26 @@ static MCInstPrinter *createHexagonMCInstPrinter(const Triple &T,
const MCInstrInfo &MII,
const MCRegisterInfo &MRI) {
if (SyntaxVariant == 0)
- return(new HexagonInstPrinter(MAI, MII, MRI));
+ return (new HexagonInstPrinter(MAI, MII, MRI));
else
- return nullptr;
+ return nullptr;
+}
+
+MCTargetStreamer *createMCAsmTargetStreamer(
+ MCStreamer &S, formatted_raw_ostream &OS, MCInstPrinter *InstPrint,
+ bool IsVerboseAsm) {
+ return new HexagonTargetAsmStreamer(S, OS, IsVerboseAsm, *InstPrint);
+}
+
+static MCStreamer *createMCStreamer(Triple const &T, MCContext &Context,
+ MCAsmBackend &MAB, raw_pwrite_stream &OS,
+ MCCodeEmitter *Emitter, bool RelaxAll) {
+ return createHexagonELFStreamer(Context, MAB, OS, Emitter);
+}
+
+static MCTargetStreamer *
+createHexagonObjectTargetStreamer(MCStreamer &S, MCSubtargetInfo const &STI) {
+ return new HexagonTargetELFStreamer(S, STI);
}
// Force static initialization.
@@ -116,7 +218,17 @@ extern "C" void LLVMInitializeHexagonTargetMC() {
TargetRegistry::RegisterMCAsmBackend(TheHexagonTarget,
createHexagonAsmBackend);
+ // Register the obj streamer
+ TargetRegistry::RegisterELFStreamer(TheHexagonTarget, createMCStreamer);
+
+ // Register the asm streamer
+ TargetRegistry::RegisterAsmTargetStreamer(TheHexagonTarget,
+ createMCAsmTargetStreamer);
+
// Register the MC Inst Printer
TargetRegistry::RegisterMCInstPrinter(TheHexagonTarget,
createHexagonMCInstPrinter);
+
+ TargetRegistry::RegisterObjectTargetStreamer(
+ TheHexagonTarget, createHexagonObjectTargetStreamer);
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h
index 81211cc026db..89c3eb3cd65e 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.h
@@ -27,6 +27,7 @@ class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
class Target;
+class Triple;
class StringRef;
class raw_ostream;
class raw_pwrite_stream;
@@ -42,13 +43,13 @@ MCCodeEmitter *createHexagonMCCodeEmitter(MCInstrInfo const &MCII,
MCContext &MCT);
MCAsmBackend *createHexagonAsmBackend(Target const &T,
- MCRegisterInfo const &MRI, StringRef TT,
- StringRef CPU);
+ MCRegisterInfo const &MRI,
+ const Triple &TT, StringRef CPU);
MCObjectWriter *createHexagonELFObjectWriter(raw_pwrite_stream &OS,
uint8_t OSABI, StringRef CPU);
-} // End llvm namespace
+} // namespace llvm
// Define symbolic names for Hexagon registers. This defines a mapping from
// register name to register number.
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h
index 9218fd3eb070..53325f6edb7c 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h
@@ -134,6 +134,6 @@ public:
void setError(unsigned Err) { Error = Err; };
unsigned getError() const { return (Error); };
};
-}
+} // namespace llvm
#endif // HEXAGONSHUFFLER_H
diff --git a/lib/Target/LLVMBuild.txt b/lib/Target/LLVMBuild.txt
index 3af3426b94c1..ab8232489282 100644
--- a/lib/Target/LLVMBuild.txt
+++ b/lib/Target/LLVMBuild.txt
@@ -19,6 +19,7 @@
; will typically require only insertion of a line.
[common]
subdirectories =
+ AMDGPU
ARM
AArch64
BPF
@@ -28,7 +29,6 @@ subdirectories =
NVPTX
Mips
PowerPC
- R600
Sparc
SystemZ
X86
diff --git a/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h b/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
index 70141a998e4a..80565aab180e 100644
--- a/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
+++ b/lib/Target/MSP430/InstPrinter/MSP430InstPrinter.h
@@ -40,6 +40,6 @@ namespace llvm {
void printCCOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp b/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
index 6bcfb32b176d..be445c56389a 100644
--- a/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
+++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCTargetDesc.cpp
@@ -43,8 +43,8 @@ static MCRegisterInfo *createMSP430MCRegisterInfo(StringRef TT) {
return X;
}
-static MCSubtargetInfo *createMSP430MCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
+static MCSubtargetInfo *
+createMSP430MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
InitMSP430MCSubtargetInfo(X, TT, CPU, FS);
return X;
diff --git a/lib/Target/MSP430/MSP430.h b/lib/Target/MSP430/MSP430.h
index 796f25233123..302012e1b148 100644
--- a/lib/Target/MSP430/MSP430.h
+++ b/lib/Target/MSP430/MSP430.h
@@ -30,7 +30,7 @@ namespace MSP430CC {
COND_INVALID = -1
};
-}
+} // namespace MSP430CC
namespace llvm {
class MSP430TargetMachine;
@@ -42,6 +42,6 @@ namespace llvm {
FunctionPass *createMSP430BranchSelectionPass();
-} // end namespace llvm;
+} // namespace llvm
#endif
diff --git a/lib/Target/MSP430/MSP430BranchSelector.cpp b/lib/Target/MSP430/MSP430BranchSelector.cpp
index ffcf22216d4f..2bc11c07f8ff 100644
--- a/lib/Target/MSP430/MSP430BranchSelector.cpp
+++ b/lib/Target/MSP430/MSP430BranchSelector.cpp
@@ -44,7 +44,7 @@ namespace {
}
};
char MSP430BSel::ID = 0;
-}
+} // namespace
/// createMSP430BranchSelectionPass - returns an instance of the Branch
/// Selection Pass
diff --git a/lib/Target/MSP430/MSP430FrameLowering.h b/lib/Target/MSP430/MSP430FrameLowering.h
index 48c4dc866a63..2f20bbd8ae15 100644
--- a/lib/Target/MSP430/MSP430FrameLowering.h
+++ b/lib/Target/MSP430/MSP430FrameLowering.h
@@ -49,6 +49,6 @@ public:
RegScavenger *RS = nullptr) const override;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 5ce5013d898c..a60108df360c 100644
--- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -85,7 +85,7 @@ namespace {
errs() << " JT" << JT << " Align" << Align << '\n';
}
};
-}
+} // namespace
/// MSP430DAGToDAGISel - MSP430 specific code to select MSP430 machine
/// instructions for SelectionDAG operations.
diff --git a/lib/Target/MSP430/MSP430ISelLowering.h b/lib/Target/MSP430/MSP430ISelLowering.h
index 80d3ae175fb1..b09060939ac5 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/lib/Target/MSP430/MSP430ISelLowering.h
@@ -64,7 +64,7 @@ namespace llvm {
/// SHL, SRA, SRL - Non-constant shifts.
SHL, SRA, SRL
};
- }
+ } // namespace MSP430ISD
class MSP430Subtarget;
class MSP430TargetLowering : public TargetLowering {
diff --git a/lib/Target/MSP430/MSP430InstrInfo.cpp b/lib/Target/MSP430/MSP430InstrInfo.cpp
index 27681aae6068..72b1780fd1ce 100644
--- a/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -262,7 +262,7 @@ bool MSP430InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
unsigned
MSP430InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
diff --git a/lib/Target/MSP430/MSP430InstrInfo.h b/lib/Target/MSP430/MSP430InstrInfo.h
index f9b25b639626..c6bad1eadd65 100644
--- a/lib/Target/MSP430/MSP430InstrInfo.h
+++ b/lib/Target/MSP430/MSP430InstrInfo.h
@@ -38,7 +38,7 @@ namespace MSP430II {
Size4Bytes = 3 << SizeShift,
Size6Bytes = 4 << SizeShift
};
-}
+} // namespace MSP430II
class MSP430InstrInfo : public MSP430GenInstrInfo {
const MSP430RegisterInfo RI;
@@ -82,12 +82,11 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/MSP430/MSP430MCInstLower.h b/lib/Target/MSP430/MSP430MCInstLower.h
index ebd639744bcc..ebbc6e51286e 100644
--- a/lib/Target/MSP430/MSP430MCInstLower.h
+++ b/lib/Target/MSP430/MSP430MCInstLower.h
@@ -42,6 +42,6 @@ public:
MCSymbol *GetBlockAddressSymbol(const MachineOperand &MO) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/MSP430/MSP430MachineFunctionInfo.h b/lib/Target/MSP430/MSP430MachineFunctionInfo.h
index fcc5f5b88600..3d1a245c4fea 100644
--- a/lib/Target/MSP430/MSP430MachineFunctionInfo.h
+++ b/lib/Target/MSP430/MSP430MachineFunctionInfo.h
@@ -49,6 +49,6 @@ public:
void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/MSP430/MSP430SelectionDAGInfo.h b/lib/Target/MSP430/MSP430SelectionDAGInfo.h
index 61a6b19111db..95c929372a7f 100644
--- a/lib/Target/MSP430/MSP430SelectionDAGInfo.h
+++ b/lib/Target/MSP430/MSP430SelectionDAGInfo.h
@@ -26,6 +26,6 @@ public:
~MSP430SelectionDAGInfo();
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/MSP430/MSP430Subtarget.cpp b/lib/Target/MSP430/MSP430Subtarget.cpp
index 3dda3bf95e5e..6374f41c00ea 100644
--- a/lib/Target/MSP430/MSP430Subtarget.cpp
+++ b/lib/Target/MSP430/MSP430Subtarget.cpp
@@ -31,7 +31,7 @@ MSP430Subtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
return *this;
}
-MSP430Subtarget::MSP430Subtarget(const std::string &TT, const std::string &CPU,
+MSP430Subtarget::MSP430Subtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM)
: MSP430GenSubtargetInfo(TT, CPU, FS), FrameLowering(),
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
diff --git a/lib/Target/MSP430/MSP430Subtarget.h b/lib/Target/MSP430/MSP430Subtarget.h
index 30d46d389ee5..958a5d39487d 100644
--- a/lib/Target/MSP430/MSP430Subtarget.h
+++ b/lib/Target/MSP430/MSP430Subtarget.h
@@ -41,7 +41,7 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
- MSP430Subtarget(const std::string &TT, const std::string &CPU,
+ MSP430Subtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM);
MSP430Subtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
@@ -64,6 +64,6 @@ public:
return &TSInfo;
}
};
-} // End llvm namespace
+} // namespace llvm
#endif // LLVM_TARGET_MSP430_SUBTARGET_H
diff --git a/lib/Target/MSP430/MSP430TargetMachine.cpp b/lib/Target/MSP430/MSP430TargetMachine.cpp
index d6cc4ae5ecd4..97a4047d1d63 100644
--- a/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -25,7 +25,7 @@ extern "C" void LLVMInitializeMSP430Target() {
RegisterTargetMachine<MSP430TargetMachine> X(TheMSP430Target);
}
-MSP430TargetMachine::MSP430TargetMachine(const Target &T, StringRef TT,
+MSP430TargetMachine::MSP430TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
diff --git a/lib/Target/MSP430/MSP430TargetMachine.h b/lib/Target/MSP430/MSP430TargetMachine.h
index 6ccd30d393fa..4f955a8049c7 100644
--- a/lib/Target/MSP430/MSP430TargetMachine.h
+++ b/lib/Target/MSP430/MSP430TargetMachine.h
@@ -28,8 +28,8 @@ class MSP430TargetMachine : public LLVMTargetMachine {
MSP430Subtarget Subtarget;
public:
- MSP430TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
+ MSP430TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
~MSP430TargetMachine() override;
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 9c054e5ac231..5b8d633554b8 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -186,6 +186,10 @@ class MipsAsmParser : public MCTargetAsmParser {
bool Is32BitImm, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
+ bool loadAndAddSymbolAddress(const MCExpr *SymExpr, unsigned DstReg,
+ unsigned SrcReg, bool Is32BitSym, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+
bool expandLoadImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
@@ -197,10 +201,6 @@ class MipsAsmParser : public MCTargetAsmParser {
bool expandUncondBranchMMPseudo(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
- void expandLoadAddressSym(const MCOperand &DstRegOp, const MCOperand &SymOp,
- bool Is32BitSym, SMLoc IDLoc,
- SmallVectorImpl<MCInst> &Instructions);
-
void expandMemInst(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions, bool isLoad,
bool isImmOpnd);
@@ -208,6 +208,12 @@ class MipsAsmParser : public MCTargetAsmParser {
bool expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
+ bool expandBranchImm(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+
+ bool expandCondBranches(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+
void createNop(bool hasShortDelaySlot, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
@@ -879,6 +885,9 @@ public:
bool isConstantImm() const {
return isImm() && dyn_cast<MCConstantExpr>(getImm());
}
+ template <unsigned Bits> bool isUImm() const {
+ return isImm() && isConstantImm() && isUInt<Bits>(getConstantImm());
+ }
bool isToken() const override {
// Note: It's not possible to pretend that other operand kinds are tokens.
// The matcher emitter checks tokens first.
@@ -1616,6 +1625,16 @@ bool MipsAsmParser::needsExpansion(MCInst &Inst) {
case Mips::SWM_MM:
case Mips::JalOneReg:
case Mips::JalTwoReg:
+ case Mips::BneImm:
+ case Mips::BeqImm:
+ case Mips::BLT:
+ case Mips::BLE:
+ case Mips::BGE:
+ case Mips::BGT:
+ case Mips::BLTU:
+ case Mips::BLEU:
+ case Mips::BGEU:
+ case Mips::BGTU:
return true;
default:
return false;
@@ -1642,6 +1661,18 @@ bool MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc,
case Mips::JalOneReg:
case Mips::JalTwoReg:
return expandJalWithRegs(Inst, IDLoc, Instructions);
+ case Mips::BneImm:
+ case Mips::BeqImm:
+ return expandBranchImm(Inst, IDLoc, Instructions);
+ case Mips::BLT:
+ case Mips::BLE:
+ case Mips::BGE:
+ case Mips::BGT:
+ case Mips::BLTU:
+ case Mips::BLEU:
+ case Mips::BGEU:
+ case Mips::BGTU:
+ return expandCondBranches(Inst, IDLoc, Instructions);
}
}
@@ -1898,15 +1929,20 @@ MipsAsmParser::expandLoadAddressReg(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
const MCOperand &DstRegOp = Inst.getOperand(0);
assert(DstRegOp.isReg() && "expected register operand kind");
+ const MCOperand &SrcRegOp = Inst.getOperand(1);
+ assert(SrcRegOp.isReg() && "expected register operand kind");
+
const MCOperand &ImmOp = Inst.getOperand(2);
assert((ImmOp.isImm() || ImmOp.isExpr()) &&
"expected immediate operand kind");
if (!ImmOp.isImm()) {
- expandLoadAddressSym(DstRegOp, ImmOp, Is32BitImm, IDLoc, Instructions);
+ if (loadAndAddSymbolAddress(ImmOp.getExpr(), DstRegOp.getReg(),
+ SrcRegOp.getReg(), Is32BitImm, IDLoc,
+ Instructions))
+ return true;
+
return false;
}
- const MCOperand &SrcRegOp = Inst.getOperand(1);
- assert(SrcRegOp.isReg() && "expected register operand kind");
if (loadImmediate(ImmOp.getImm(), DstRegOp.getReg(), SrcRegOp.getReg(),
Is32BitImm, IDLoc, Instructions))
@@ -1925,7 +1961,11 @@ MipsAsmParser::expandLoadAddressImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
assert((ImmOp.isImm() || ImmOp.isExpr()) &&
"expected immediate operand kind");
if (!ImmOp.isImm()) {
- expandLoadAddressSym(DstRegOp, ImmOp, Is32BitImm, IDLoc, Instructions);
+ if (loadAndAddSymbolAddress(ImmOp.getExpr(), DstRegOp.getReg(),
+ Mips::NoRegister, Is32BitImm, IDLoc,
+ Instructions))
+ return true;
+
return false;
}
@@ -1936,8 +1976,8 @@ MipsAsmParser::expandLoadAddressImm(MCInst &Inst, bool Is32BitImm, SMLoc IDLoc,
return false;
}
-void MipsAsmParser::expandLoadAddressSym(
- const MCOperand &DstRegOp, const MCOperand &SymOp, bool Is32BitSym,
+bool MipsAsmParser::loadAndAddSymbolAddress(
+ const MCExpr *SymExpr, unsigned DstReg, unsigned SrcReg, bool Is32BitSym,
SMLoc IDLoc, SmallVectorImpl<MCInst> &Instructions) {
warnIfNoMacro(IDLoc);
@@ -1945,14 +1985,12 @@ void MipsAsmParser::expandLoadAddressSym(
Warning(IDLoc, "instruction loads the 32-bit address of a 64-bit symbol");
MCInst tmpInst;
- unsigned RegNo = DstRegOp.getReg();
- const MCSymbolRefExpr *Symbol = cast<MCSymbolRefExpr>(SymOp.getExpr());
- const MCSymbolRefExpr *HiExpr =
- MCSymbolRefExpr::create(Symbol->getSymbol().getName(),
- MCSymbolRefExpr::VK_Mips_ABS_HI, getContext());
- const MCSymbolRefExpr *LoExpr =
- MCSymbolRefExpr::create(Symbol->getSymbol().getName(),
- MCSymbolRefExpr::VK_Mips_ABS_LO, getContext());
+ const MCSymbolRefExpr *Symbol = cast<MCSymbolRefExpr>(SymExpr);
+ const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::create(
+ &Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_ABS_HI, getContext());
+ const MCSymbolRefExpr *LoExpr = MCSymbolRefExpr::create(
+ &Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_ABS_LO, getContext());
+
if (!Is32BitSym) {
// If it's a 64-bit architecture, expand to:
// la d,sym => lui d,highest(sym)
@@ -1961,36 +1999,39 @@ void MipsAsmParser::expandLoadAddressSym(
// ori d,d,hi16(sym)
// dsll d,d,16
// ori d,d,lo16(sym)
- const MCSymbolRefExpr *HighestExpr =
- MCSymbolRefExpr::create(Symbol->getSymbol().getName(),
- MCSymbolRefExpr::VK_Mips_HIGHEST, getContext());
- const MCSymbolRefExpr *HigherExpr =
- MCSymbolRefExpr::create(Symbol->getSymbol().getName(),
- MCSymbolRefExpr::VK_Mips_HIGHER, getContext());
+ const MCSymbolRefExpr *HighestExpr = MCSymbolRefExpr::create(
+ &Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_HIGHEST, getContext());
+ const MCSymbolRefExpr *HigherExpr = MCSymbolRefExpr::create(
+ &Symbol->getSymbol(), MCSymbolRefExpr::VK_Mips_HIGHER, getContext());
tmpInst.setOpcode(Mips::LUi);
- tmpInst.addOperand(MCOperand::createReg(RegNo));
+ tmpInst.addOperand(MCOperand::createReg(DstReg));
tmpInst.addOperand(MCOperand::createExpr(HighestExpr));
Instructions.push_back(tmpInst);
- createLShiftOri<0>(MCOperand::createExpr(HigherExpr), RegNo, SMLoc(),
+ createLShiftOri<0>(MCOperand::createExpr(HigherExpr), DstReg, SMLoc(),
Instructions);
- createLShiftOri<16>(MCOperand::createExpr(HiExpr), RegNo, SMLoc(),
+ createLShiftOri<16>(MCOperand::createExpr(HiExpr), DstReg, SMLoc(),
Instructions);
- createLShiftOri<16>(MCOperand::createExpr(LoExpr), RegNo, SMLoc(),
+ createLShiftOri<16>(MCOperand::createExpr(LoExpr), DstReg, SMLoc(),
Instructions);
} else {
// Otherwise, expand to:
// la d,sym => lui d,hi16(sym)
// ori d,d,lo16(sym)
tmpInst.setOpcode(Mips::LUi);
- tmpInst.addOperand(MCOperand::createReg(RegNo));
+ tmpInst.addOperand(MCOperand::createReg(DstReg));
tmpInst.addOperand(MCOperand::createExpr(HiExpr));
Instructions.push_back(tmpInst);
- createLShiftOri<0>(MCOperand::createExpr(LoExpr), RegNo, SMLoc(),
+ createLShiftOri<0>(MCOperand::createExpr(LoExpr), DstReg, SMLoc(),
Instructions);
}
+
+ if (SrcReg != Mips::NoRegister)
+ createAddu(DstReg, DstReg, SrcReg, Instructions);
+
+ return false;
}
bool MipsAsmParser::expandUncondBranchMMPseudo(
@@ -2032,10 +2073,62 @@ bool MipsAsmParser::expandUncondBranchMMPseudo(
return false;
}
+bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
+ const MCOperand &DstRegOp = Inst.getOperand(0);
+ assert(DstRegOp.isReg() && "expected register operand kind");
+
+ const MCOperand &ImmOp = Inst.getOperand(1);
+ assert(ImmOp.isImm() && "expected immediate operand kind");
+
+ const MCOperand &MemOffsetOp = Inst.getOperand(2);
+ assert(MemOffsetOp.isImm() && "expected immediate operand kind");
+
+ unsigned OpCode = 0;
+ switch(Inst.getOpcode()) {
+ case Mips::BneImm:
+ OpCode = Mips::BNE;
+ break;
+ case Mips::BeqImm:
+ OpCode = Mips::BEQ;
+ break;
+ default:
+ llvm_unreachable("Unknown immediate branch pseudo-instruction.");
+ break;
+ }
+
+ int64_t ImmValue = ImmOp.getImm();
+ if (ImmValue == 0) {
+ MCInst BranchInst;
+ BranchInst.setOpcode(OpCode);
+ BranchInst.addOperand(DstRegOp);
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MemOffsetOp);
+ Instructions.push_back(BranchInst);
+ } else {
+ warnIfNoMacro(IDLoc);
+
+ unsigned ATReg = getATReg(IDLoc);
+ if (!ATReg)
+ return true;
+
+ if (loadImmediate(ImmValue, ATReg, Mips::NoRegister, !isGP64bit(), IDLoc,
+ Instructions))
+ return true;
+
+ MCInst BranchInst;
+ BranchInst.setOpcode(OpCode);
+ BranchInst.addOperand(DstRegOp);
+ BranchInst.addOperand(MCOperand::createReg(ATReg));
+ BranchInst.addOperand(MemOffsetOp);
+ Instructions.push_back(BranchInst);
+ }
+ return false;
+}
+
void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions,
bool isLoad, bool isImmOpnd) {
- const MCSymbolRefExpr *SR;
MCInst TempInst;
unsigned ImmOffset, HiOffset, LoOffset;
const MCExpr *ExprOffset;
@@ -2102,16 +2195,8 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
if (isImmOpnd)
TempInst.addOperand(MCOperand::createImm(HiOffset));
else {
- if (ExprOffset->getKind() == MCExpr::SymbolRef) {
- SR = static_cast<const MCSymbolRefExpr *>(ExprOffset);
- const MCSymbolRefExpr *HiExpr = MCSymbolRefExpr::create(
- SR->getSymbol().getName(), MCSymbolRefExpr::VK_Mips_ABS_HI,
- getContext());
- TempInst.addOperand(MCOperand::createExpr(HiExpr));
- } else {
- const MCExpr *HiExpr = evaluateRelocExpr(ExprOffset, "hi");
- TempInst.addOperand(MCOperand::createExpr(HiExpr));
- }
+ const MCExpr *HiExpr = evaluateRelocExpr(ExprOffset, "hi");
+ TempInst.addOperand(MCOperand::createExpr(HiExpr));
}
// Add the instruction to the list.
Instructions.push_back(TempInst);
@@ -2134,15 +2219,8 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
if (isImmOpnd)
TempInst.addOperand(MCOperand::createImm(LoOffset));
else {
- if (ExprOffset->getKind() == MCExpr::SymbolRef) {
- const MCSymbolRefExpr *LoExpr = MCSymbolRefExpr::create(
- SR->getSymbol().getName(), MCSymbolRefExpr::VK_Mips_ABS_LO,
- getContext());
- TempInst.addOperand(MCOperand::createExpr(LoExpr));
- } else {
- const MCExpr *LoExpr = evaluateRelocExpr(ExprOffset, "lo");
- TempInst.addOperand(MCOperand::createExpr(LoExpr));
- }
+ const MCExpr *LoExpr = evaluateRelocExpr(ExprOffset, "lo");
+ TempInst.addOperand(MCOperand::createExpr(LoExpr));
}
Instructions.push_back(TempInst);
TempInst.clear();
@@ -2171,6 +2249,206 @@ MipsAsmParser::expandLoadStoreMultiple(MCInst &Inst, SMLoc IDLoc,
return false;
}
+bool MipsAsmParser::expandCondBranches(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
+ unsigned PseudoOpcode = Inst.getOpcode();
+ unsigned SrcReg = Inst.getOperand(0).getReg();
+ unsigned TrgReg = Inst.getOperand(1).getReg();
+ const MCExpr *OffsetExpr = Inst.getOperand(2).getExpr();
+
+ unsigned ZeroSrcOpcode, ZeroTrgOpcode;
+ bool ReverseOrderSLT, IsUnsigned, AcceptsEquality;
+
+ switch (PseudoOpcode) {
+ case Mips::BLT:
+ case Mips::BLTU:
+ AcceptsEquality = false;
+ ReverseOrderSLT = false;
+ IsUnsigned = (PseudoOpcode == Mips::BLTU);
+ ZeroSrcOpcode = Mips::BGTZ;
+ ZeroTrgOpcode = Mips::BLTZ;
+ break;
+ case Mips::BLE:
+ case Mips::BLEU:
+ AcceptsEquality = true;
+ ReverseOrderSLT = true;
+ IsUnsigned = (PseudoOpcode == Mips::BLEU);
+ ZeroSrcOpcode = Mips::BGEZ;
+ ZeroTrgOpcode = Mips::BLEZ;
+ break;
+ case Mips::BGE:
+ case Mips::BGEU:
+ AcceptsEquality = true;
+ ReverseOrderSLT = false;
+ IsUnsigned = (PseudoOpcode == Mips::BGEU);
+ ZeroSrcOpcode = Mips::BLEZ;
+ ZeroTrgOpcode = Mips::BGEZ;
+ break;
+ case Mips::BGT:
+ case Mips::BGTU:
+ AcceptsEquality = false;
+ ReverseOrderSLT = true;
+ IsUnsigned = (PseudoOpcode == Mips::BGTU);
+ ZeroSrcOpcode = Mips::BLTZ;
+ ZeroTrgOpcode = Mips::BGTZ;
+ break;
+ default:
+ llvm_unreachable("unknown opcode for branch pseudo-instruction");
+ }
+
+ MCInst BranchInst;
+ bool IsTrgRegZero = (TrgReg == Mips::ZERO);
+ bool IsSrcRegZero = (SrcReg == Mips::ZERO);
+ if (IsSrcRegZero && IsTrgRegZero) {
+ // FIXME: All of these Opcode-specific if's are needed for compatibility
+ // with GAS' behaviour. However, they may not generate the most efficient
+ // code in some circumstances.
+ if (PseudoOpcode == Mips::BLT) {
+ BranchInst.setOpcode(Mips::BLTZ);
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ return false;
+ }
+ if (PseudoOpcode == Mips::BLE) {
+ BranchInst.setOpcode(Mips::BLEZ);
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ Warning(IDLoc, "branch is always taken");
+ return false;
+ }
+ if (PseudoOpcode == Mips::BGE) {
+ BranchInst.setOpcode(Mips::BGEZ);
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ Warning(IDLoc, "branch is always taken");
+ return false;
+ }
+ if (PseudoOpcode == Mips::BGT) {
+ BranchInst.setOpcode(Mips::BGTZ);
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ return false;
+ }
+ if (PseudoOpcode == Mips::BGTU) {
+ BranchInst.setOpcode(Mips::BNE);
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ return false;
+ }
+ if (AcceptsEquality) {
+ // If both registers are $0 and the pseudo-branch accepts equality, it
+ // will always be taken, so we emit an unconditional branch.
+ BranchInst.setOpcode(Mips::BEQ);
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ Warning(IDLoc, "branch is always taken");
+ return false;
+ }
+ // If both registers are $0 and the pseudo-branch does not accept
+ // equality, it will never be taken, so we don't have to emit anything.
+ return false;
+ }
+ if (IsSrcRegZero || IsTrgRegZero) {
+ if ((IsSrcRegZero && PseudoOpcode == Mips::BGTU) ||
+ (IsTrgRegZero && PseudoOpcode == Mips::BLTU)) {
+ // If the $rs is $0 and the pseudo-branch is BGTU (0 > x) or
+ // if the $rt is $0 and the pseudo-branch is BLTU (x < 0),
+ // the pseudo-branch will never be taken, so we don't emit anything.
+ // This only applies to unsigned pseudo-branches.
+ return false;
+ }
+ if ((IsSrcRegZero && PseudoOpcode == Mips::BLEU) ||
+ (IsTrgRegZero && PseudoOpcode == Mips::BGEU)) {
+ // If the $rs is $0 and the pseudo-branch is BLEU (0 <= x) or
+ // if the $rt is $0 and the pseudo-branch is BGEU (x >= 0),
+ // the pseudo-branch will always be taken, so we emit an unconditional
+ // branch.
+ // This only applies to unsigned pseudo-branches.
+ BranchInst.setOpcode(Mips::BEQ);
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ Warning(IDLoc, "branch is always taken");
+ return false;
+ }
+ if (IsUnsigned) {
+ // If the $rs is $0 and the pseudo-branch is BLTU (0 < x) or
+ // if the $rt is $0 and the pseudo-branch is BGTU (x > 0),
+ // the pseudo-branch will be taken only when the non-zero register is
+ // different from 0, so we emit a BNEZ.
+ //
+ // If the $rs is $0 and the pseudo-branch is BGEU (0 >= x) or
+ // if the $rt is $0 and the pseudo-branch is BLEU (x <= 0),
+ // the pseudo-branch will be taken only when the non-zero register is
+ // equal to 0, so we emit a BEQZ.
+ //
+ // Because only BLEU and BGEU branch on equality, we can use the
+ // AcceptsEquality variable to decide when to emit the BEQZ.
+ BranchInst.setOpcode(AcceptsEquality ? Mips::BEQ : Mips::BNE);
+ BranchInst.addOperand(
+ MCOperand::createReg(IsSrcRegZero ? TrgReg : SrcReg));
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ return false;
+ }
+ // If we have a signed pseudo-branch and one of the registers is $0,
+ // we can use an appropriate compare-to-zero branch. We select which one
+ // to use in the switch statement above.
+ BranchInst.setOpcode(IsSrcRegZero ? ZeroSrcOpcode : ZeroTrgOpcode);
+ BranchInst.addOperand(MCOperand::createReg(IsSrcRegZero ? TrgReg : SrcReg));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ return false;
+ }
+
+ // If neither the SrcReg nor the TrgReg are $0, we need AT to perform the
+ // expansions. If it is not available, we return.
+ unsigned ATRegNum = getATReg(IDLoc);
+ if (!ATRegNum)
+ return true;
+
+ warnIfNoMacro(IDLoc);
+
+ // SLT fits well with 2 of our 4 pseudo-branches:
+ // BLT, where $rs < $rt, translates into "slt $at, $rs, $rt" and
+ // BGT, where $rs > $rt, translates into "slt $at, $rt, $rs".
+ // If the result of the SLT is 1, we branch, and if it's 0, we don't.
+ // This is accomplished by using a BNEZ with the result of the SLT.
+ //
+ // The other 2 pseudo-branches are opposites of the above 2 (BGE with BLT
+ // and BLE with BGT), so we change the BNEZ into a a BEQZ.
+ // Because only BGE and BLE branch on equality, we can use the
+ // AcceptsEquality variable to decide when to emit the BEQZ.
+ // Note that the order of the SLT arguments doesn't change between
+ // opposites.
+ //
+ // The same applies to the unsigned variants, except that SLTu is used
+ // instead of SLT.
+ MCInst SetInst;
+ SetInst.setOpcode(IsUnsigned ? Mips::SLTu : Mips::SLT);
+ SetInst.addOperand(MCOperand::createReg(ATRegNum));
+ SetInst.addOperand(MCOperand::createReg(ReverseOrderSLT ? TrgReg : SrcReg));
+ SetInst.addOperand(MCOperand::createReg(ReverseOrderSLT ? SrcReg : TrgReg));
+ Instructions.push_back(SetInst);
+
+ BranchInst.setOpcode(AcceptsEquality ? Mips::BEQ : Mips::BNE);
+ BranchInst.addOperand(MCOperand::createReg(ATRegNum));
+ BranchInst.addOperand(MCOperand::createReg(Mips::ZERO));
+ BranchInst.addOperand(MCOperand::createExpr(OffsetExpr));
+ Instructions.push_back(BranchInst);
+ return false;
+}
+
void MipsAsmParser::createNop(bool hasShortDelaySlot, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions) {
MCInst NopInst;
@@ -2572,7 +2850,7 @@ const MCExpr *MipsAsmParser::evaluateRelocExpr(const MCExpr *Expr,
if (const MCSymbolRefExpr *MSRE = dyn_cast<MCSymbolRefExpr>(Expr)) {
// It's a symbol, create a symbolic expression from the symbol.
- StringRef Symbol = MSRE->getSymbol().getName();
+ const MCSymbol *Symbol = &MSRE->getSymbol();
MCSymbolRefExpr::VariantKind VK = getVariantKind(RelocStr);
Res = MCSymbolRefExpr::create(Symbol, VK, getContext());
return Res;
diff --git a/lib/Target/Mips/LLVMBuild.txt b/lib/Target/Mips/LLVMBuild.txt
index 0e8d902c56d2..06af8a10a4d2 100644
--- a/lib/Target/Mips/LLVMBuild.txt
+++ b/lib/Target/Mips/LLVMBuild.txt
@@ -31,5 +31,16 @@ has_jit = 1
type = Library
name = MipsCodeGen
parent = Mips
-required_libraries = Analysis AsmPrinter CodeGen Core MC MipsAsmPrinter MipsDesc MipsInfo SelectionDAG Support Target
+required_libraries =
+ Analysis
+ AsmPrinter
+ CodeGen
+ Core
+ MC
+ MipsAsmPrinter
+ MipsDesc
+ MipsInfo
+ SelectionDAG
+ Support
+ Target
add_to_library_groups = Mips
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp
index 70b9cca8cf6e..725ea7f971eb 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.cpp
@@ -66,4 +66,4 @@ MCStreamer &operator<<(MCStreamer &OS, MipsABIFlagsSection &ABIFlagsSection) {
OS.EmitIntValue(ABIFlagsSection.getFlags2Value(), 4); // flags2
return OS;
}
-}
+} // namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
index b078cd30a87b..bf306ee4814b 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h
@@ -186,6 +186,6 @@ public:
};
MCStreamer &operator<<(MCStreamer &OS, MipsABIFlagsSection &ABIFlagsSection);
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
index bf8f7d12880d..8e6c9e69b223 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp
@@ -47,7 +47,7 @@ unsigned MipsABIInfo::GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const {
llvm_unreachable("Unhandled ABI");
}
-MipsABIInfo MipsABIInfo::computeTargetABI(Triple TT, StringRef CPU,
+MipsABIInfo MipsABIInfo::computeTargetABI(const Triple &TT, StringRef CPU,
const MCTargetOptions &Options) {
if (Options.getABIName().startswith("o32"))
return MipsABIInfo::O32();
diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h
index d20dc9037951..aa965e82a6bf 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.h
@@ -36,7 +36,7 @@ public:
static MipsABIInfo N32() { return MipsABIInfo(ABI::N32); }
static MipsABIInfo N64() { return MipsABIInfo(ABI::N64); }
static MipsABIInfo EABI() { return MipsABIInfo(ABI::EABI); }
- static MipsABIInfo computeTargetABI(Triple TT, StringRef CPU,
+ static MipsABIInfo computeTargetABI(const Triple &TT, StringRef CPU,
const MCTargetOptions &Options);
bool IsKnown() const { return ThisABI != ABI::Unknown; }
@@ -73,6 +73,6 @@ public:
unsigned GetEhDataReg(unsigned I) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
index d823ffca8bb7..5c746b2894b2 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp
@@ -417,32 +417,27 @@ void MipsAsmBackend::processFixupValue(const MCAssembler &Asm,
// MCAsmBackend
MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT,
- StringRef CPU) {
- return new MipsAsmBackend(T, Triple(TT).getOS(),
- /*IsLittle*/true, /*Is64Bit*/false);
+ const Triple &TT, StringRef CPU) {
+ return new MipsAsmBackend(T, TT.getOS(), /*IsLittle*/ true,
+ /*Is64Bit*/ false);
}
MCAsmBackend *llvm::createMipsAsmBackendEB32(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT,
- StringRef CPU) {
- return new MipsAsmBackend(T, Triple(TT).getOS(),
- /*IsLittle*/false, /*Is64Bit*/false);
+ const Triple &TT, StringRef CPU) {
+ return new MipsAsmBackend(T, TT.getOS(), /*IsLittle*/ false,
+ /*Is64Bit*/ false);
}
MCAsmBackend *llvm::createMipsAsmBackendEL64(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT,
- StringRef CPU) {
- return new MipsAsmBackend(T, Triple(TT).getOS(),
- /*IsLittle*/true, /*Is64Bit*/true);
+ const Triple &TT, StringRef CPU) {
+ return new MipsAsmBackend(T, TT.getOS(), /*IsLittle*/ true, /*Is64Bit*/ true);
}
MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT,
- StringRef CPU) {
- return new MipsAsmBackend(T, Triple(TT).getOS(),
- /*IsLittle*/false, /*Is64Bit*/true);
+ const Triple &TT, StringRef CPU) {
+ return new MipsAsmBackend(T, TT.getOS(), /*IsLittle*/ false,
+ /*Is64Bit*/ true);
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
index b3d5a4964f86..fe84e4021d34 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h
@@ -87,6 +87,6 @@ public:
}; // class MipsAsmBackend
-} // namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
index ff7779ec1e78..a7d5a1e75e41 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h
@@ -119,7 +119,7 @@ namespace MipsII {
FormMask = 15
};
-}
-}
+} // namespace MipsII
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
index 982a7f54e825..a45e2ad8cf16 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp
@@ -51,7 +51,7 @@ struct MipsRelocationEntry {
virtual void sortRelocs(const MCAssembler &Asm,
std::vector<ELFRelocationEntry> &Relocs) override;
};
-}
+} // namespace
MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI,
bool _isN64, bool IsLittleEndian)
@@ -64,13 +64,47 @@ MipsELFObjectWriter::~MipsELFObjectWriter() {}
unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
- // determine the type of the relocation
+ // Determine the type of the relocation.
unsigned Kind = (unsigned)Fixup.getKind();
switch (Kind) {
+ case Mips::fixup_Mips_16:
+ case FK_Data_2:
+ return IsPCRel ? ELF::R_MIPS_PC16 : ELF::R_MIPS_16;
case Mips::fixup_Mips_32:
case FK_Data_4:
return IsPCRel ? ELF::R_MIPS_PC32 : ELF::R_MIPS_32;
+ }
+
+ if (IsPCRel) {
+ switch (Kind) {
+ case Mips::fixup_Mips_Branch_PCRel:
+ case Mips::fixup_Mips_PC16:
+ return ELF::R_MIPS_PC16;
+ case Mips::fixup_MICROMIPS_PC7_S1:
+ return ELF::R_MICROMIPS_PC7_S1;
+ case Mips::fixup_MICROMIPS_PC10_S1:
+ return ELF::R_MICROMIPS_PC10_S1;
+ case Mips::fixup_MICROMIPS_PC16_S1:
+ return ELF::R_MICROMIPS_PC16_S1;
+ case Mips::fixup_MIPS_PC19_S2:
+ return ELF::R_MIPS_PC19_S2;
+ case Mips::fixup_MIPS_PC18_S3:
+ return ELF::R_MIPS_PC18_S3;
+ case Mips::fixup_MIPS_PC21_S2:
+ return ELF::R_MIPS_PC21_S2;
+ case Mips::fixup_MIPS_PC26_S2:
+ return ELF::R_MIPS_PC26_S2;
+ case Mips::fixup_MIPS_PCHI16:
+ return ELF::R_MIPS_PCHI16;
+ case Mips::fixup_MIPS_PCLO16:
+ return ELF::R_MIPS_PCLO16;
+ }
+
+ llvm_unreachable("invalid PC-relative fixup kind!");
+ }
+
+ switch (Kind) {
case Mips::fixup_Mips_64:
case FK_Data_8:
return ELF::R_MIPS_64;
@@ -110,9 +144,6 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
return ELF::R_MIPS_TLS_DTPREL_HI16;
case Mips::fixup_Mips_DTPREL_LO:
return ELF::R_MIPS_TLS_DTPREL_LO16;
- case Mips::fixup_Mips_Branch_PCRel:
- case Mips::fixup_Mips_PC16:
- return ELF::R_MIPS_PC16;
case Mips::fixup_Mips_GOT_PAGE:
return ELF::R_MIPS_GOT_PAGE;
case Mips::fixup_Mips_GOT_OFST:
@@ -153,12 +184,6 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
return ELF::R_MICROMIPS_LO16;
case Mips::fixup_MICROMIPS_GOT16:
return ELF::R_MICROMIPS_GOT16;
- case Mips::fixup_MICROMIPS_PC7_S1:
- return ELF::R_MICROMIPS_PC7_S1;
- case Mips::fixup_MICROMIPS_PC10_S1:
- return ELF::R_MICROMIPS_PC10_S1;
- case Mips::fixup_MICROMIPS_PC16_S1:
- return ELF::R_MICROMIPS_PC16_S1;
case Mips::fixup_MICROMIPS_CALL16:
return ELF::R_MICROMIPS_CALL16;
case Mips::fixup_MICROMIPS_GOT_DISP:
@@ -179,19 +204,8 @@ unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target,
return ELF::R_MICROMIPS_TLS_TPREL_HI16;
case Mips::fixup_MICROMIPS_TLS_TPREL_LO16:
return ELF::R_MICROMIPS_TLS_TPREL_LO16;
- case Mips::fixup_MIPS_PC19_S2:
- return ELF::R_MIPS_PC19_S2;
- case Mips::fixup_MIPS_PC18_S3:
- return ELF::R_MIPS_PC18_S3;
- case Mips::fixup_MIPS_PC21_S2:
- return ELF::R_MIPS_PC21_S2;
- case Mips::fixup_MIPS_PC26_S2:
- return ELF::R_MIPS_PC26_S2;
- case Mips::fixup_MIPS_PCHI16:
- return ELF::R_MIPS_PCHI16;
- case Mips::fixup_MIPS_PCLO16:
- return ELF::R_MIPS_PCLO16;
}
+
llvm_unreachable("invalid fixup kind!");
}
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
index 687b800c2409..81a0a987bc4e 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h
@@ -25,6 +25,6 @@ bool baseRegNeedsLoadStoreMask(unsigned Reg);
MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context, MCAsmBackend &TAB,
raw_pwrite_stream &OS,
MCCodeEmitter *Emitter, bool RelaxAll);
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index 54d88632abdb..9bdf8235a2b4 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -43,11 +43,9 @@ using namespace llvm;
/// Select the Mips CPU for the given triple and cpu name.
/// FIXME: Merge with the copy in MipsSubtarget.cpp
-StringRef MIPS_MC::selectMipsCPU(StringRef TT, StringRef CPU) {
+StringRef MIPS_MC::selectMipsCPU(const Triple &TT, StringRef CPU) {
if (CPU.empty() || CPU == "generic") {
- Triple TheTriple(TT);
- if (TheTriple.getArch() == Triple::mips ||
- TheTriple.getArch() == Triple::mipsel)
+ if (TT.getArch() == Triple::mips || TT.getArch() == Triple::mipsel)
CPU = "mips32";
else
CPU = "mips64";
@@ -67,8 +65,8 @@ static MCRegisterInfo *createMipsMCRegisterInfo(StringRef TT) {
return X;
}
-static MCSubtargetInfo *createMipsMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
+static MCSubtargetInfo *createMipsMCSubtargetInfo(const Triple &TT,
+ StringRef CPU, StringRef FS) {
CPU = MIPS_MC::selectMipsCPU(TT, CPU);
MCSubtargetInfo *X = new MCSubtargetInfo();
InitMipsMCSubtargetInfo(X, TT, CPU, FS);
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
index 577a8b3ea3bb..20358a0f9cf2 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h
@@ -26,6 +26,7 @@ class MCRegisterInfo;
class MCSubtargetInfo;
class StringRef;
class Target;
+class Triple;
class raw_ostream;
class raw_pwrite_stream;
@@ -42,26 +43,26 @@ MCCodeEmitter *createMipsMCCodeEmitterEL(const MCInstrInfo &MCII,
MCContext &Ctx);
MCAsmBackend *createMipsAsmBackendEB32(const Target &T,
- const MCRegisterInfo &MRI, StringRef TT,
- StringRef CPU);
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
MCAsmBackend *createMipsAsmBackendEL32(const Target &T,
- const MCRegisterInfo &MRI, StringRef TT,
- StringRef CPU);
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
MCAsmBackend *createMipsAsmBackendEB64(const Target &T,
- const MCRegisterInfo &MRI, StringRef TT,
- StringRef CPU);
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
MCAsmBackend *createMipsAsmBackendEL64(const Target &T,
- const MCRegisterInfo &MRI, StringRef TT,
- StringRef CPU);
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
MCObjectWriter *createMipsELFObjectWriter(raw_pwrite_stream &OS, uint8_t OSABI,
bool IsLittleEndian, bool Is64Bit);
namespace MIPS_MC {
-StringRef selectMipsCPU(StringRef TT, StringRef CPU);
+StringRef selectMipsCPU(const Triple &TT, StringRef CPU);
}
-} // End llvm namespace
+} // namespace llvm
// Defines symbolic names for Mips registers. This defines a mapping from
// register name to register number.
diff --git a/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
index aef9bd3a8e2a..537867503eda 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp
@@ -265,4 +265,4 @@ MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context, MCAsmBackend &TAB,
return S;
}
-}
+} // namespace llvm
diff --git a/lib/Target/Mips/MicroMips32r6InstrFormats.td b/lib/Target/Mips/MicroMips32r6InstrFormats.td
index 7350b97731ba..78ba76d27cbb 100644
--- a/lib/Target/Mips/MicroMips32r6InstrFormats.td
+++ b/lib/Target/Mips/MicroMips32r6InstrFormats.td
@@ -221,3 +221,22 @@ class CMP_BRANCH_1R_BOTH_OFF16_FM_MMR6<bits<6> funct> : MipsR6Inst {
let Inst{20-16} = rt;
let Inst{15-0} = offset;
}
+
+class ERET_FM_MMR6<string instr_asm> : MMR6Arch<instr_asm> {
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-16} = 0x00;
+ let Inst{15-6} = 0x3cd;
+ let Inst{5-0} = 0x3c;
+}
+
+class ERETNC_FM_MMR6<string instr_asm> : MMR6Arch<instr_asm> {
+ bits<32> Inst;
+
+ let Inst{31-26} = 0x00;
+ let Inst{25-17} = 0x00;
+ let Inst{16-16} = 0x01;
+ let Inst{15-6} = 0x3cd;
+ let Inst{5-0} = 0x3c;
+}
diff --git a/lib/Target/Mips/MicroMips32r6InstrInfo.td b/lib/Target/Mips/MicroMips32r6InstrInfo.td
index 2259d5d77904..ed71c3d9b5f6 100644
--- a/lib/Target/Mips/MicroMips32r6InstrInfo.td
+++ b/lib/Target/Mips/MicroMips32r6InstrInfo.td
@@ -40,6 +40,8 @@ class CLO_MMR6_ENC : POOL32A_2R_FM_MMR6<0b0100101100>;
class CLZ_MMR6_ENC : SPECIAL_2R_FM_MMR6<0b010000>;
class DIV_MMR6_ENC : ARITH_FM_MMR6<"div", 0x118>;
class DIVU_MMR6_ENC : ARITH_FM_MMR6<"divu", 0x198>;
+class ERET_MMR6_ENC : ERET_FM_MMR6<"eret">;
+class ERETNC_MMR6_ENC : ERETNC_FM_MMR6<"eretnc">;
class JIALC_MMR6_ENC : JMP_IDX_COMPACT_FM<0b100000>;
class JIC_MMR6_ENC : JMP_IDX_COMPACT_FM<0b101000>;
class LSA_MMR6_ENC : POOL32A_LSA_FM<0b001111>;
@@ -164,6 +166,9 @@ class CLO_CLZ_MMR6_DESC_BASE<string instr_asm, RegisterOperand GPROpnd>
class CLO_MMR6_DESC : CLO_CLZ_MMR6_DESC_BASE<"clo", GPR32Opnd>;
class CLZ_MMR6_DESC : CLO_CLZ_MMR6_DESC_BASE<"clz", GPR32Opnd>;
+class ERET_MMR6_DESC : ER_FT<"eret">;
+class ERETNC_MMR6_DESC : ER_FT<"eretnc">;
+
class JMP_MMR6_IDX_COMPACT_DESC_BASE<string opstr, DAGOperand opnd,
RegisterOperand GPROpnd>
: MMR6Arch<opstr> {
@@ -302,6 +307,9 @@ def CLO_MMR6 : R6MMR6Rel, CLO_MMR6_ENC, CLO_MMR6_DESC, ISA_MICROMIPS32R6;
def CLZ_MMR6 : R6MMR6Rel, CLZ_MMR6_ENC, CLZ_MMR6_DESC, ISA_MICROMIPS32R6;
def DIV_MMR6 : R6MMR6Rel, DIV_MMR6_DESC, DIV_MMR6_ENC, ISA_MICROMIPS32R6;
def DIVU_MMR6 : R6MMR6Rel, DIVU_MMR6_DESC, DIVU_MMR6_ENC, ISA_MICROMIPS32R6;
+def ERET_MMR6 : R6MMR6Rel, ERET_MMR6_DESC, ERET_MMR6_ENC, ISA_MICROMIPS32R6;
+def ERETNC_MMR6 : R6MMR6Rel, ERETNC_MMR6_DESC, ERETNC_MMR6_ENC,
+ ISA_MICROMIPS32R6;
def JIALC_MMR6 : R6MMR6Rel, JIALC_MMR6_ENC, JIALC_MMR6_DESC, ISA_MICROMIPS32R6;
def JIC_MMR6 : R6MMR6Rel, JIC_MMR6_ENC, JIC_MMR6_DESC, ISA_MICROMIPS32R6;
def LSA_MMR6 : R6MMR6Rel, LSA_MMR6_ENC, LSA_MMR6_DESC, ISA_MICROMIPS32R6;
diff --git a/lib/Target/Mips/Mips.h b/lib/Target/Mips/Mips.h
index 671d7a87cc3d..604b6704c033 100644
--- a/lib/Target/Mips/Mips.h
+++ b/lib/Target/Mips/Mips.h
@@ -31,6 +31,6 @@ namespace llvm {
FunctionPass *createMipsDelaySlotFillerPass(MipsTargetMachine &TM);
FunctionPass *createMipsLongBranchPass(MipsTargetMachine &TM);
FunctionPass *createMipsConstantIslandPass(MipsTargetMachine &tm);
-} // end namespace llvm;
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/Mips16FrameLowering.h b/lib/Target/Mips/Mips16FrameLowering.h
index f281c927c1c4..2c33cfb96530 100644
--- a/lib/Target/Mips/Mips16FrameLowering.h
+++ b/lib/Target/Mips/Mips16FrameLowering.h
@@ -42,6 +42,6 @@ public:
RegScavenger *RS) const override;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp
index 893fc7cdf473..f2831fd5d0f6 100644
--- a/lib/Target/Mips/Mips16HardFloat.cpp
+++ b/lib/Target/Mips/Mips16HardFloat.cpp
@@ -62,7 +62,7 @@ namespace {
};
char Mips16HardFloat::ID = 0;
-}
+} // namespace
//
// Return types that matter for hard float are:
diff --git a/lib/Target/Mips/Mips16HardFloatInfo.cpp b/lib/Target/Mips/Mips16HardFloatInfo.cpp
index 2eb6e5ddd2d9..bf82108728de 100644
--- a/lib/Target/Mips/Mips16HardFloatInfo.cpp
+++ b/lib/Target/Mips/Mips16HardFloatInfo.cpp
@@ -46,5 +46,5 @@ extern FuncSignature const *findFuncSignature(const char *name) {
}
return nullptr;
}
-}
-}
+} // namespace Mips16HardFloatInfo
+} // namespace llvm
diff --git a/lib/Target/Mips/Mips16HardFloatInfo.h b/lib/Target/Mips/Mips16HardFloatInfo.h
index 7295c287576d..8354c33d33bc 100644
--- a/lib/Target/Mips/Mips16HardFloatInfo.h
+++ b/lib/Target/Mips/Mips16HardFloatInfo.h
@@ -44,7 +44,7 @@ struct FuncNameSignature {
extern const FuncNameSignature PredefinedFuncs[];
extern FuncSignature const *findFuncSignature(const char *name);
-}
-}
+} // namespace Mips16HardFloatInfo
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/Mips16ISelDAGToDAG.h b/lib/Target/Mips/Mips16ISelDAGToDAG.h
index ae0e61e19d9d..ce6b3f8486a9 100644
--- a/lib/Target/Mips/Mips16ISelDAGToDAG.h
+++ b/lib/Target/Mips/Mips16ISelDAGToDAG.h
@@ -48,6 +48,6 @@ private:
FunctionPass *createMips16ISelDag(MipsTargetMachine &TM);
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp
index 846e3c964f44..c52ef2a4e195 100644
--- a/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -54,7 +54,7 @@ struct Mips16IntrinsicHelperType{
return std::strcmp(Name, RHS.Name) == 0;
}
};
-}
+} // namespace
// Libcalls for which no helper is generated. Sorted by name for binary search.
static const Mips16Libcall HardFloatLibCalls[] = {
diff --git a/lib/Target/Mips/Mips16ISelLowering.h b/lib/Target/Mips/Mips16ISelLowering.h
index d3b9f750f347..99d3cacca67a 100644
--- a/lib/Target/Mips/Mips16ISelLowering.h
+++ b/lib/Target/Mips/Mips16ISelLowering.h
@@ -77,6 +77,6 @@ namespace llvm {
unsigned SltiOpc, unsigned SltiXOpc,
MachineInstr *MI, MachineBasicBlock *BB )const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/Mips16InstrInfo.h b/lib/Target/Mips/Mips16InstrInfo.h
index 6540b40bc9ab..1132d8a0318d 100644
--- a/lib/Target/Mips/Mips16InstrInfo.h
+++ b/lib/Target/Mips/Mips16InstrInfo.h
@@ -123,6 +123,6 @@ private:
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td
index 8a27874a37ce..83781ff24ac5 100644
--- a/lib/Target/Mips/Mips64InstrInfo.td
+++ b/lib/Target/Mips/Mips64InstrInfo.td
@@ -27,8 +27,6 @@ def uimm16_64 : Operand<i64> {
// Signed Operand
def simm10_64 : Operand<i64>;
-def imm64: Operand<i64>;
-
// Transformation Function - get Imm - 32.
def Subtract32 : SDNodeXForm<imm, [{
return getImm(N, (unsigned)N->getZExtValue() - 32);
diff --git a/lib/Target/Mips/MipsAnalyzeImmediate.h b/lib/Target/Mips/MipsAnalyzeImmediate.h
index ae3c38ced80b..6b5d02b7a7e0 100644
--- a/lib/Target/Mips/MipsAnalyzeImmediate.h
+++ b/lib/Target/Mips/MipsAnalyzeImmediate.h
@@ -58,6 +58,6 @@ namespace llvm {
unsigned ADDiu, ORi, SLL, LUi;
InstSeq Insts;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index f84666b6229e..1c80021086bd 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -694,9 +694,8 @@ void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
// clean anyhow.
// FIXME: For ifunc related functions we could iterate over and look
// for a feature string that doesn't match the default one.
- StringRef TT = TM.getTargetTriple();
- StringRef CPU =
- MIPS_MC::selectMipsCPU(TM.getTargetTriple(), TM.getTargetCPU());
+ const Triple &TT = TM.getTargetTriple();
+ StringRef CPU = MIPS_MC::selectMipsCPU(TT, TM.getTargetCPU());
StringRef FS = TM.getTargetFeatureString();
const MipsTargetMachine &MTM = static_cast<const MipsTargetMachine &>(TM);
const MipsSubtarget STI(TT, CPU, FS, MTM.isLittleEndian(), MTM);
@@ -900,7 +899,8 @@ void MipsAsmPrinter::EmitFPCallStub(
// freed) and since we're at the global level we can use the default
// constructed subtarget.
std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
- TM.getTargetTriple(), TM.getTargetCPU(), TM.getTargetFeatureString()));
+ TM.getTargetTriple().str(), TM.getTargetCPU(),
+ TM.getTargetFeatureString()));
//
// .global xxxx
diff --git a/lib/Target/Mips/MipsAsmPrinter.h b/lib/Target/Mips/MipsAsmPrinter.h
index a7f3304a3da8..3c2b843b8963 100644
--- a/lib/Target/Mips/MipsAsmPrinter.h
+++ b/lib/Target/Mips/MipsAsmPrinter.h
@@ -145,7 +145,7 @@ public:
void EmitEndOfAsmFile(Module &M) override;
void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsCCState.h b/lib/Target/Mips/MipsCCState.h
index 081c393a09be..04a9ef5ef051 100644
--- a/lib/Target/Mips/MipsCCState.h
+++ b/lib/Target/Mips/MipsCCState.h
@@ -131,6 +131,6 @@ public:
bool IsCallOperandFixed(unsigned ValNo) { return CallOperandIsFixed[ValNo]; }
SpecialCallingConvType getSpecialCallingConv() { return SpecialCallingConv; }
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsFrameLowering.h b/lib/Target/Mips/MipsFrameLowering.h
index 5eabd58e8686..dab9c055df6f 100644
--- a/lib/Target/Mips/MipsFrameLowering.h
+++ b/lib/Target/Mips/MipsFrameLowering.h
@@ -49,6 +49,6 @@ protected:
const MipsFrameLowering *createMips16FrameLowering(const MipsSubtarget &ST);
const MipsFrameLowering *createMipsSEFrameLowering(const MipsSubtarget &ST);
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.h b/lib/Target/Mips/MipsISelDAGToDAG.h
index 1426d0fbf516..83be74f0d466 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsISelDAGToDAG.h
@@ -129,6 +129,6 @@ private:
unsigned ConstraintID,
std::vector<SDValue> &OutOps) override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index bc9a1ce64097..e4f3cde0c804 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -204,7 +204,7 @@ namespace llvm {
SDL,
SDR
};
- }
+ } // namespace MipsISD
//===--------------------------------------------------------------------===//
// TargetLowering Implementation
@@ -566,6 +566,6 @@ namespace llvm {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
}
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp
index 0839147984b5..bb23cc04e696 100644
--- a/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/lib/Target/Mips/MipsInstrInfo.cpp
@@ -96,8 +96,7 @@ bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
void
MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- DebugLoc DL,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ DebugLoc DL, ArrayRef<MachineOperand> Cond) const {
unsigned Opc = Cond[0].getImm();
const MCInstrDesc &MCID = get(Opc);
MachineInstrBuilder MIB = BuildMI(&MBB, DL, MCID);
@@ -115,7 +114,7 @@ MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
unsigned MipsInstrInfo::InsertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
+ ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h
index 45895355e1a5..3daff5fa5d36 100644
--- a/lib/Target/Mips/MipsInstrInfo.h
+++ b/lib/Target/Mips/MipsInstrInfo.h
@@ -59,8 +59,7 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
bool
@@ -140,13 +139,13 @@ private:
SmallVectorImpl<MachineOperand> &Cond) const;
void BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB, DebugLoc DL,
- const SmallVectorImpl<MachineOperand>& Cond) const;
+ ArrayRef<MachineOperand> Cond) const;
};
/// Create MipsInstrInfo objects.
const MipsInstrInfo *createMips16InstrInfo(const MipsSubtarget &STI);
const MipsInstrInfo *createMipsSEInstrInfo(const MipsSubtarget &STI);
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index 58791cf2734a..2a7949eb15eb 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -358,6 +358,8 @@ def calltarget : Operand<iPTR> {
let ParserMatchClass = MipsJumpTargetAsmOperand;
}
+def imm64: Operand<i64>;
+
def simm9 : Operand<i32>;
def simm10 : Operand<i32>;
def simm11 : Operand<i32>;
@@ -384,7 +386,15 @@ def simm20 : Operand<i32> {
def uimm20 : Operand<i32> {
}
+def MipsUImm10AsmOperand : AsmOperandClass {
+ let Name = "UImm10";
+ let RenderMethod = "addImmOperands";
+ let ParserMethod = "parseImm";
+ let PredicateMethod = "isUImm<10>";
+}
+
def uimm10 : Operand<i32> {
+ let ParserMatchClass = MipsUImm10AsmOperand;
}
def simm16_64 : Operand<i64> {
@@ -1273,7 +1283,9 @@ def SYSCALL : MMRel, SYS_FT<"syscall">, SYS_FM<0xc>;
def TRAP : TrapBase<BREAK>;
def SDBBP : MMRel, SYS_FT<"sdbbp">, SDBBP_FM, ISA_MIPS32_NOT_32R6_64R6;
+let AdditionalPredicates = [NotInMicroMips] in {
def ERET : MMRel, ER_FT<"eret">, ER_FM<0x18>, INSN_MIPS3_32;
+}
def DERET : MMRel, ER_FT<"deret">, ER_FM<0x1f>, ISA_MIPS32;
def EI : MMRel, DEI_FT<"ei", GPR32Opnd>, EI_FM<1>, ISA_MIPS32R2;
@@ -1672,6 +1684,29 @@ def JalTwoReg : MipsAsmPseudoInst<(outs GPR32Opnd:$rd), (ins GPR32Opnd:$rs),
def JalOneReg : MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs),
"jal\t$rs"> ;
+let hasDelaySlot = 1 in {
+def BneImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rt),
+ (ins imm64:$imm64, brtarget:$offset),
+ "bne\t$rt, $imm64, $offset">;
+def BeqImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rt),
+ (ins imm64:$imm64, brtarget:$offset),
+ "beq\t$rt, $imm64, $offset">;
+
+class CondBranchPseudo<string instr_asm> :
+ MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, GPR32Opnd:$rt,
+ brtarget:$offset),
+ !strconcat(instr_asm, "\t$rs, $rt, $offset")>;
+}
+
+def BLT : CondBranchPseudo<"blt">;
+def BLE : CondBranchPseudo<"ble">;
+def BGE : CondBranchPseudo<"bge">;
+def BGT : CondBranchPseudo<"bgt">;
+def BLTU : CondBranchPseudo<"bltu">;
+def BLEU : CondBranchPseudo<"bleu">;
+def BGEU : CondBranchPseudo<"bgeu">;
+def BGTU : CondBranchPseudo<"bgtu">;
+
//===----------------------------------------------------------------------===//
// Arbitrary patterns that map to one or more instructions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/Mips/MipsMCInstLower.h b/lib/Target/Mips/MipsMCInstLower.h
index 1ce27e401850..a8bd1cd78d1d 100644
--- a/lib/Target/Mips/MipsMCInstLower.h
+++ b/lib/Target/Mips/MipsMCInstLower.h
@@ -45,6 +45,6 @@ private:
MCSymbolRefExpr::VariantKind Kind) const;
bool lowerLongBranch(const MachineInstr *MI, MCInst &OutMI) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp b/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp
index b18a673912f8..8568137ff374 100644
--- a/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsModuleISelDAGToDAG.cpp
@@ -37,7 +37,7 @@ namespace {
};
char MipsModuleDAGToDAGISel::ID = 0;
-}
+} // namespace
bool MipsModuleDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
DEBUG(errs() << "In MipsModuleDAGToDAGISel::runMachineFunction\n");
diff --git a/lib/Target/Mips/MipsOs16.cpp b/lib/Target/Mips/MipsOs16.cpp
index b6cd79193cfc..5c71272e99be 100644
--- a/lib/Target/Mips/MipsOs16.cpp
+++ b/lib/Target/Mips/MipsOs16.cpp
@@ -43,7 +43,7 @@ namespace {
};
char MipsOs16::ID = 0;
-}
+} // namespace
// Figure out if we need float point based on the function signature.
// We need to move variables in and/or out of floating point
diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp
index ec7bf314c641..a858f30b94a8 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -75,7 +75,7 @@ private:
const MipsSEInstrInfo &TII;
const MipsRegisterInfo &RegInfo;
};
-}
+} // namespace
ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
: MF(MF_), MRI(MF.getRegInfo()),
diff --git a/lib/Target/Mips/MipsSEFrameLowering.h b/lib/Target/Mips/MipsSEFrameLowering.h
index 2fcd6bbb9a15..ee56b8b8c8ff 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.h
+++ b/lib/Target/Mips/MipsSEFrameLowering.h
@@ -39,6 +39,6 @@ public:
unsigned ehDataReg(unsigned I) const;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.h b/lib/Target/Mips/MipsSEISelDAGToDAG.h
index a894034020e9..fb2f04121556 100644
--- a/lib/Target/Mips/MipsSEISelDAGToDAG.h
+++ b/lib/Target/Mips/MipsSEISelDAGToDAG.h
@@ -126,6 +126,6 @@ private:
FunctionPass *createMipsSEISelDag(MipsTargetMachine &TM);
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsSEISelLowering.h b/lib/Target/Mips/MipsSEISelLowering.h
index d44f8d82ec3e..623630a18078 100644
--- a/lib/Target/Mips/MipsSEISelLowering.h
+++ b/lib/Target/Mips/MipsSEISelLowering.h
@@ -112,6 +112,6 @@ namespace llvm {
MachineBasicBlock *emitFEXP2_D_1(MachineInstr *MI,
MachineBasicBlock *BB) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsSEInstrInfo.h b/lib/Target/Mips/MipsSEInstrInfo.h
index bebbabf7b838..cdafe9f4d48b 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/lib/Target/Mips/MipsSEInstrInfo.h
@@ -113,6 +113,6 @@ private:
MachineBasicBlock::iterator I) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsSelectionDAGInfo.h b/lib/Target/Mips/MipsSelectionDAGInfo.h
index 061423fbeb86..feddf9808264 100644
--- a/lib/Target/Mips/MipsSelectionDAGInfo.h
+++ b/lib/Target/Mips/MipsSelectionDAGInfo.h
@@ -26,6 +26,6 @@ public:
~MipsSelectionDAGInfo();
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp
index 7ea10eb954f3..c41bb16a58ea 100644
--- a/lib/Target/Mips/MipsSubtarget.cpp
+++ b/lib/Target/Mips/MipsSubtarget.cpp
@@ -59,7 +59,7 @@ static cl::opt<bool>
void MipsSubtarget::anchor() { }
-MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
+MipsSubtarget::MipsSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, bool little,
const MipsTargetMachine &TM)
: MipsGenSubtargetInfo(TT, CPU, FS), MipsArchVersion(MipsDefault),
@@ -126,7 +126,7 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
}
/// This overrides the PostRAScheduler bit in the SchedModel for any CPU.
-bool MipsSubtarget::enablePostMachineScheduler() const { return true; }
+bool MipsSubtarget::enablePostRAScheduler() const { return true; }
void MipsSubtarget::getCriticalPathRCs(RegClassVector &CriticalPathRCs) const {
CriticalPathRCs.clear();
diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h
index 0bfafc8b47a6..c8a2e4bd72c5 100644
--- a/lib/Target/Mips/MipsSubtarget.h
+++ b/lib/Target/Mips/MipsSubtarget.h
@@ -147,7 +147,7 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
public:
/// This overrides the PostRAScheduler bit in the SchedModel for each CPU.
- bool enablePostMachineScheduler() const override;
+ bool enablePostRAScheduler() const override;
void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const override;
CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const override;
@@ -161,9 +161,8 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
- MipsSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, bool little,
- const MipsTargetMachine &TM);
+ MipsSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
+ bool little, const MipsTargetMachine &TM);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
@@ -293,6 +292,6 @@ public:
return &InstrItins;
}
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index b279184ea304..c820668befa0 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -44,12 +44,11 @@ extern "C" void LLVMInitializeMipsTarget() {
RegisterTargetMachine<MipselTargetMachine> B(TheMips64elTarget);
}
-static std::string computeDataLayout(StringRef TT, StringRef CPU,
+static std::string computeDataLayout(const Triple &TT, StringRef CPU,
const TargetOptions &Options,
bool isLittle) {
std::string Ret = "";
- MipsABIInfo ABI =
- MipsABIInfo::computeTargetABI(Triple(TT), CPU, Options.MCOptions);
+ MipsABIInfo ABI = MipsABIInfo::computeTargetABI(TT, CPU, Options.MCOptions);
// There are both little and big endian mips.
if (isLittle)
@@ -83,7 +82,7 @@ static std::string computeDataLayout(StringRef TT, StringRef CPU,
// offset from the stack/frame pointer, using StackGrowsUp enables
// an easier handling.
// Using CodeModel::Large enables different CALL behavior.
-MipsTargetMachine::MipsTargetMachine(const Target &T, StringRef TT,
+MipsTargetMachine::MipsTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
@@ -91,7 +90,7 @@ MipsTargetMachine::MipsTargetMachine(const Target &T, StringRef TT,
: LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT,
CPU, FS, Options, RM, CM, OL),
isLittle(isLittle), TLOF(make_unique<MipsTargetObjectFile>()),
- ABI(MipsABIInfo::computeTargetABI(Triple(TT), CPU, Options.MCOptions)),
+ ABI(MipsABIInfo::computeTargetABI(TT, CPU, Options.MCOptions)),
Subtarget(nullptr), DefaultSubtarget(TT, CPU, FS, isLittle, *this),
NoMips16Subtarget(TT, CPU, FS.empty() ? "-mips16" : FS.str() + ",-mips16",
isLittle, *this),
@@ -105,21 +104,21 @@ MipsTargetMachine::~MipsTargetMachine() {}
void MipsebTargetMachine::anchor() { }
-MipsebTargetMachine::
-MipsebTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
+MipsebTargetMachine::MipsebTargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
void MipselTargetMachine::anchor() { }
-MipselTargetMachine::
-MipselTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
- : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
+MipselTargetMachine::MipselTargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
const MipsSubtarget *
MipsTargetMachine::getSubtargetImpl(const Function &F) const {
@@ -157,7 +156,8 @@ MipsTargetMachine::getSubtargetImpl(const Function &F) const {
// creation will depend on the TM and the code generation flags on the
// function that reside in TargetOptions.
resetTargetOptions(F);
- I = llvm::make_unique<MipsSubtarget>(TargetTriple, CPU, FS, isLittle, *this);
+ I = llvm::make_unique<MipsSubtarget>(TargetTriple, CPU, FS, isLittle,
+ *this);
}
return I.get();
}
diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h
index 5427d6a8304b..976970ccbcc6 100644
--- a/lib/Target/Mips/MipsTargetMachine.h
+++ b/lib/Target/Mips/MipsTargetMachine.h
@@ -39,8 +39,8 @@ class MipsTargetMachine : public LLVMTargetMachine {
mutable StringMap<std::unique_ptr<MipsSubtarget>> SubtargetMap;
public:
- MipsTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM,
+ MipsTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle);
~MipsTargetMachine() override;
@@ -73,8 +73,8 @@ public:
class MipsebTargetMachine : public MipsTargetMachine {
virtual void anchor();
public:
- MipsebTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
+ MipsebTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
@@ -84,12 +84,12 @@ public:
class MipselTargetMachine : public MipsTargetMachine {
virtual void anchor();
public:
- MipselTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
+ MipselTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Mips/MipsTargetStreamer.h b/lib/Target/Mips/MipsTargetStreamer.h
index fed06005e9c8..39cadc1e0f83 100644
--- a/lib/Target/Mips/MipsTargetStreamer.h
+++ b/lib/Target/Mips/MipsTargetStreamer.h
@@ -248,5 +248,5 @@ public:
void emitDirectiveModuleOddSPReg(bool Enabled, bool IsO32ABI) override;
void emitMipsAbiFlags();
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/NVPTX/CMakeLists.txt b/lib/Target/NVPTX/CMakeLists.txt
index d48a7a9b1fcc..99e950eba80f 100644
--- a/lib/Target/NVPTX/CMakeLists.txt
+++ b/lib/Target/NVPTX/CMakeLists.txt
@@ -21,6 +21,7 @@ set(NVPTXCodeGen_sources
NVPTXInstrInfo.cpp
NVPTXLowerAggrCopies.cpp
NVPTXLowerKernelArgs.cpp
+ NVPTXLowerAlloca.cpp
NVPTXMCExpr.cpp
NVPTXPrologEpilogPass.cpp
NVPTXRegisterInfo.cpp
diff --git a/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h
index 02c5a210d099..8144f3fde730 100644
--- a/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h
+++ b/lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.h
@@ -49,6 +49,6 @@ public:
raw_ostream &O, const char *Modifier = nullptr);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h b/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
index a72ae2ef53a7..b55664ed32a7 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXBaseInfo.h
@@ -94,7 +94,7 @@ enum {
IsSurfTexQueryFlag = 0x800,
IsTexModeUnifiedFlag = 0x1000
};
-}
-}
+} // namespace NVPTXII
+} // namespace llvm
#endif
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
index d50010508eaa..8a28b089ce35 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCTargetDesc.cpp
@@ -45,7 +45,7 @@ static MCRegisterInfo *createNVPTXMCRegisterInfo(StringRef TT) {
}
static MCSubtargetInfo *
-createNVPTXMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS) {
+createNVPTXMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
InitNVPTXMCSubtargetInfo(X, TT, CPU, FS);
return X;
diff --git a/lib/Target/NVPTX/ManagedStringPool.h b/lib/Target/NVPTX/ManagedStringPool.h
index a2d670f8d39d..1480b61afdbe 100644
--- a/lib/Target/NVPTX/ManagedStringPool.h
+++ b/lib/Target/NVPTX/ManagedStringPool.h
@@ -43,6 +43,6 @@ public:
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/NVPTX/NVPTX.h b/lib/Target/NVPTX/NVPTX.h
index 477b0bac6ca8..d06d61f5e550 100644
--- a/lib/Target/NVPTX/NVPTX.h
+++ b/lib/Target/NVPTX/NVPTX.h
@@ -70,6 +70,7 @@ MachineFunctionPass *createNVPTXPrologEpilogPass();
MachineFunctionPass *createNVPTXReplaceImageHandlesPass();
FunctionPass *createNVPTXImageOptimizerPass();
FunctionPass *createNVPTXLowerKernelArgsPass(const NVPTXTargetMachine *TM);
+BasicBlockPass *createNVPTXLowerAllocaPass();
bool isImageOrSamplerVal(const Value *, const Module *);
@@ -132,7 +133,7 @@ enum VecType {
V2 = 2,
V4 = 4
};
-}
+} // namespace PTXLdStInstCode
/// PTXCvtMode - Conversion code enumeration
namespace PTXCvtMode {
@@ -151,7 +152,7 @@ enum CvtMode {
FTZ_FLAG = 0x10,
SAT_FLAG = 0x20
};
-}
+} // namespace PTXCvtMode
/// PTXCmpMode - Comparison mode enumeration
namespace PTXCmpMode {
@@ -179,9 +180,9 @@ enum CmpMode {
BASE_MASK = 0xFF,
FTZ_FLAG = 0x100
};
-}
-}
-} // end namespace llvm;
+} // namespace PTXCmpMode
+} // namespace NVPTX
+} // namespace llvm
// Defines symbolic names for NVPTX registers. This defines a mapping from
// register name to register number.
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index 298b992b241f..1a1a8ca7c666 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -109,7 +109,7 @@ void VisitGlobalVariableForEmission(
Visited.insert(GV);
Visiting.erase(GV);
}
-}
+} // namespace
void NVPTXAsmPrinter::emitLineNumberAsDotLoc(const MachineInstr &MI) {
if (!EmitLineNumbers)
@@ -808,7 +808,7 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) {
// Construct a default subtarget off of the TargetMachine defaults. The
// rest of NVPTX isn't friendly to change subtargets per function and
// so the default TargetMachine will have all of the options.
- StringRef TT = TM.getTargetTriple();
+ const Triple &TT = TM.getTargetTriple();
StringRef CPU = TM.getTargetCPU();
StringRef FS = TM.getTargetFeatureString();
const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
@@ -818,7 +818,6 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) {
raw_svector_ostream OS1(Str1);
MMI = getAnalysisIfAvailable<MachineModuleInfo>();
- MMI->AnalyzeModule(M);
// We need to call the parent's one explicitly.
//bool Result = AsmPrinter::doInitialization(M);
@@ -847,7 +846,7 @@ bool NVPTXAsmPrinter::doInitialization(Module &M) {
}
// If we're not NVCL we're CUDA, go ahead and emit filenames.
- if (Triple(TM.getTargetTriple()).getOS() != Triple::NVCL)
+ if (TM.getTargetTriple().getOS() != Triple::NVCL)
recordAndEmitFilenames(M);
GlobalsEmitted = false;
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.h b/lib/Target/NVPTX/NVPTXAsmPrinter.h
index f6f7685e76f9..12d80a34a4e8 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.h
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.h
@@ -349,6 +349,6 @@ public:
DebugLoc prevDebugLoc;
void emitLineNumberAsDotLoc(const MachineInstr &);
};
-} // end of namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp b/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp
index 7d4be8e809cf..2d5e74c4c4bf 100644
--- a/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp
+++ b/lib/Target/NVPTX/NVPTXAssignValidGlobalNames.cpp
@@ -38,7 +38,7 @@ public:
/// \brief Clean up the name to remove symbols invalid in PTX.
std::string cleanUpName(StringRef Name);
};
-}
+} // namespace
char NVPTXAssignValidGlobalNames::ID = 0;
diff --git a/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp b/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
index cfff0019b8d9..3eb7024ff08a 100644
--- a/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
+++ b/lib/Target/NVPTX/NVPTXFavorNonGenericAddrSpaces.cpp
@@ -98,17 +98,16 @@ private:
/// This reordering exposes to optimizeMemoryInstruction more
/// optimization opportunities on loads and stores.
///
- /// Returns true if this function succesfully hoists an eliminable
- /// addrspacecast or V is already such an addrspacecast.
- /// Transforms "gep (addrspacecast X), indices" into "addrspacecast (gep X,
- /// indices)".
- bool hoistAddrSpaceCastFrom(Value *V, int Depth = 0);
+ /// If this function succesfully hoists an eliminable addrspacecast or V is
+ /// already such an addrspacecast, it returns the transformed value (which is
+ /// guaranteed to be an addrspacecast); otherwise, it returns nullptr.
+ Value *hoistAddrSpaceCastFrom(Value *V, int Depth = 0);
/// Helper function for GEPs.
- bool hoistAddrSpaceCastFromGEP(GEPOperator *GEP, int Depth);
+ Value *hoistAddrSpaceCastFromGEP(GEPOperator *GEP, int Depth);
/// Helper function for bitcasts.
- bool hoistAddrSpaceCastFromBitCast(BitCastOperator *BC, int Depth);
+ Value *hoistAddrSpaceCastFromBitCast(BitCastOperator *BC, int Depth);
};
-}
+} // namespace
char NVPTXFavorNonGenericAddrSpaces::ID = 0;
@@ -143,17 +142,19 @@ static bool isEliminableAddrSpaceCast(Value *V) {
DestTy->getAddressSpace() == AddressSpace::ADDRESS_SPACE_GENERIC);
}
-bool NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFromGEP(GEPOperator *GEP,
- int Depth) {
- if (!hoistAddrSpaceCastFrom(GEP->getPointerOperand(), Depth + 1))
- return false;
+Value *NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFromGEP(
+ GEPOperator *GEP, int Depth) {
+ Value *NewOperand =
+ hoistAddrSpaceCastFrom(GEP->getPointerOperand(), Depth + 1);
+ if (NewOperand == nullptr)
+ return nullptr;
- // That hoistAddrSpaceCastFrom succeeds implies GEP's pointer operand is now
- // an eliminable addrspacecast.
- assert(isEliminableAddrSpaceCast(GEP->getPointerOperand()));
- Operator *Cast = cast<Operator>(GEP->getPointerOperand());
+ // hoistAddrSpaceCastFrom returns an eliminable addrspacecast or nullptr.
+ assert(isEliminableAddrSpaceCast(NewOperand));
+ Operator *Cast = cast<Operator>(NewOperand);
SmallVector<Value *, 8> Indices(GEP->idx_begin(), GEP->idx_end());
+ Value *NewASC;
if (Instruction *GEPI = dyn_cast<Instruction>(GEP)) {
// GEP = gep (addrspacecast X), indices
// =>
@@ -163,30 +164,31 @@ bool NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFromGEP(GEPOperator *GEP,
GEP->getSourceElementType(), Cast->getOperand(0), Indices,
"", GEPI);
NewGEP->setIsInBounds(GEP->isInBounds());
- Value *NewASC = new AddrSpaceCastInst(NewGEP, GEP->getType(), "", GEPI);
+ NewASC = new AddrSpaceCastInst(NewGEP, GEP->getType(), "", GEPI);
NewASC->takeName(GEP);
+ // Without RAUWing GEP, the compiler would visit GEP again and emit
+ // redundant instructions. This is exercised in test @rauw in
+ // access-non-generic.ll.
GEP->replaceAllUsesWith(NewASC);
} else {
// GEP is a constant expression.
Constant *NewGEP = ConstantExpr::getGetElementPtr(
GEP->getSourceElementType(), cast<Constant>(Cast->getOperand(0)),
Indices, GEP->isInBounds());
- GEP->replaceAllUsesWith(
- ConstantExpr::getAddrSpaceCast(NewGEP, GEP->getType()));
+ NewASC = ConstantExpr::getAddrSpaceCast(NewGEP, GEP->getType());
}
-
- return true;
+ return NewASC;
}
-bool NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFromBitCast(
+Value *NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFromBitCast(
BitCastOperator *BC, int Depth) {
- if (!hoistAddrSpaceCastFrom(BC->getOperand(0), Depth + 1))
- return false;
+ Value *NewOperand = hoistAddrSpaceCastFrom(BC->getOperand(0), Depth + 1);
+ if (NewOperand == nullptr)
+ return nullptr;
- // That hoistAddrSpaceCastFrom succeeds implies BC's source operand is now
- // an eliminable addrspacecast.
- assert(isEliminableAddrSpaceCast(BC->getOperand(0)));
- Operator *Cast = cast<Operator>(BC->getOperand(0));
+ // hoistAddrSpaceCastFrom returns an eliminable addrspacecast or nullptr.
+ assert(isEliminableAddrSpaceCast(NewOperand));
+ Operator *Cast = cast<Operator>(NewOperand);
// Cast = addrspacecast Src
// BC = bitcast Cast
@@ -197,31 +199,34 @@ bool NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFromBitCast(
Type *TypeOfNewCast =
PointerType::get(BC->getType()->getPointerElementType(),
Src->getType()->getPointerAddressSpace());
+ Value *NewBC;
if (BitCastInst *BCI = dyn_cast<BitCastInst>(BC)) {
Value *NewCast = new BitCastInst(Src, TypeOfNewCast, "", BCI);
- Value *NewBC = new AddrSpaceCastInst(NewCast, BC->getType(), "", BCI);
+ NewBC = new AddrSpaceCastInst(NewCast, BC->getType(), "", BCI);
NewBC->takeName(BC);
+ // Without RAUWing BC, the compiler would visit BC again and emit
+ // redundant instructions. This is exercised in test @rauw in
+ // access-non-generic.ll.
BC->replaceAllUsesWith(NewBC);
} else {
// BC is a constant expression.
Constant *NewCast =
ConstantExpr::getBitCast(cast<Constant>(Src), TypeOfNewCast);
- Constant *NewBC = ConstantExpr::getAddrSpaceCast(NewCast, BC->getType());
- BC->replaceAllUsesWith(NewBC);
+ NewBC = ConstantExpr::getAddrSpaceCast(NewCast, BC->getType());
}
- return true;
+ return NewBC;
}
-bool NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFrom(Value *V,
- int Depth) {
- // Returns true if V is already an eliminable addrspacecast.
+Value *NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFrom(Value *V,
+ int Depth) {
+ // Returns V if V is already an eliminable addrspacecast.
if (isEliminableAddrSpaceCast(V))
- return true;
+ return V;
// Limit the depth to prevent this recursive function from running too long.
const int MaxDepth = 20;
if (Depth >= MaxDepth)
- return false;
+ return nullptr;
// If V is a GEP or bitcast, hoist the addrspacecast if any from its pointer
// operand. This enables optimizeMemoryInstruction to shortcut addrspacecasts
@@ -232,28 +237,29 @@ bool NVPTXFavorNonGenericAddrSpaces::hoistAddrSpaceCastFrom(Value *V,
if (BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
return hoistAddrSpaceCastFromBitCast(BC, Depth);
- return false;
+ return nullptr;
}
bool NVPTXFavorNonGenericAddrSpaces::optimizeMemoryInstruction(Instruction *MI,
unsigned Idx) {
- if (hoistAddrSpaceCastFrom(MI->getOperand(Idx))) {
- // load/store (addrspacecast X) => load/store X if shortcutting the
- // addrspacecast is valid and can improve performance.
- //
- // e.g.,
- // %1 = addrspacecast float addrspace(3)* %0 to float*
- // %2 = load float* %1
- // ->
- // %2 = load float addrspace(3)* %0
- //
- // Note: the addrspacecast can also be a constant expression.
- assert(isEliminableAddrSpaceCast(MI->getOperand(Idx)));
- Operator *ASC = dyn_cast<Operator>(MI->getOperand(Idx));
- MI->setOperand(Idx, ASC->getOperand(0));
- return true;
- }
- return false;
+ Value *NewOperand = hoistAddrSpaceCastFrom(MI->getOperand(Idx));
+ if (NewOperand == nullptr)
+ return false;
+
+ // load/store (addrspacecast X) => load/store X if shortcutting the
+ // addrspacecast is valid and can improve performance.
+ //
+ // e.g.,
+ // %1 = addrspacecast float addrspace(3)* %0 to float*
+ // %2 = load float* %1
+ // ->
+ // %2 = load float addrspace(3)* %0
+ //
+ // Note: the addrspacecast can also be a constant expression.
+ assert(isEliminableAddrSpaceCast(NewOperand));
+ Operator *ASC = dyn_cast<Operator>(NewOperand);
+ MI->setOperand(Idx, ASC->getOperand(0));
+ return true;
}
bool NVPTXFavorNonGenericAddrSpaces::runOnFunction(Function &F) {
diff --git a/lib/Target/NVPTX/NVPTXFrameLowering.h b/lib/Target/NVPTX/NVPTXFrameLowering.h
index 14f8bb7b98fe..488edecc6e7b 100644
--- a/lib/Target/NVPTX/NVPTXFrameLowering.h
+++ b/lib/Target/NVPTX/NVPTXFrameLowering.h
@@ -31,6 +31,6 @@ public:
MachineBasicBlock::iterator I) const override;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
index fe20580c83a2..5879df31f8a6 100644
--- a/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
+++ b/lib/Target/NVPTX/NVPTXISelDAGToDAG.h
@@ -95,6 +95,6 @@ private:
bool ChkMemSDNodeAddressSpace(SDNode *N, unsigned int spN) const;
};
-}
+} // namespace
#endif
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.h b/lib/Target/NVPTX/NVPTXISelLowering.h
index ed94775b3002..276851f872ea 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.h
+++ b/lib/Target/NVPTX/NVPTXISelLowering.h
@@ -427,7 +427,7 @@ enum NodeType : unsigned {
Suld3DV4I16Zero,
Suld3DV4I32Zero
};
-}
+} // namespace NVPTXISD
class NVPTXSubtarget;
diff --git a/lib/Target/NVPTX/NVPTXImageOptimizer.cpp b/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
index aa36b6be7250..c86f861acd55 100644
--- a/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
+++ b/lib/Target/NVPTX/NVPTXImageOptimizer.cpp
@@ -42,7 +42,7 @@ private:
Value *cleanupValue(Value *V);
void replaceWith(Instruction *From, ConstantInt *To);
};
-}
+} // namespace
char NVPTXImageOptimizer::ID = 0;
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
index dabc3be43a3a..76d6597c6e20 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -248,7 +248,7 @@ unsigned NVPTXInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned NVPTXInstrInfo::InsertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
+ ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.h b/lib/Target/NVPTX/NVPTXInstrInfo.h
index 9b5d491dfeb3..179c06887198 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.h
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.h
@@ -66,7 +66,7 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const override;
+ ArrayRef<MachineOperand> Cond, DebugLoc DL) const override;
unsigned getLdStCodeAddrSpace(const MachineInstr &MI) const {
return MI.getOperand(2).getImm();
}
diff --git a/lib/Target/NVPTX/NVPTXLowerAlloca.cpp b/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
new file mode 100644
index 000000000000..93d0025d8f53
--- /dev/null
+++ b/lib/Target/NVPTX/NVPTXLowerAlloca.cpp
@@ -0,0 +1,115 @@
+//===-- NVPTXLowerAlloca.cpp - Make alloca to use local memory =====--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// For all alloca instructions, and add a pair of cast to local address for
+// each of them. For example,
+//
+// %A = alloca i32
+// store i32 0, i32* %A ; emits st.u32
+//
+// will be transformed to
+//
+// %A = alloca i32
+// %Local = addrspacecast i32* %A to i32 addrspace(5)*
+// %Generic = addrspacecast i32 addrspace(5)* %A to i32*
+// store i32 0, i32 addrspace(5)* %Generic ; emits st.local.u32
+//
+// And we will rely on NVPTXFavorNonGenericAddrSpace to combine the last
+// two instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTX.h"
+#include "NVPTXUtilities.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Pass.h"
+
+using namespace llvm;
+
+namespace llvm {
+void initializeNVPTXLowerAllocaPass(PassRegistry &);
+}
+
+namespace {
+class NVPTXLowerAlloca : public BasicBlockPass {
+ bool runOnBasicBlock(BasicBlock &BB) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ NVPTXLowerAlloca() : BasicBlockPass(ID) {}
+ const char *getPassName() const override {
+ return "convert address space of alloca'ed memory to local";
+ }
+};
+} // namespace
+
+char NVPTXLowerAlloca::ID = 1;
+
+INITIALIZE_PASS(NVPTXLowerAlloca, "nvptx-lower-alloca",
+ "Lower Alloca", false, false)
+
+// =============================================================================
+// Main function for this pass.
+// =============================================================================
+bool NVPTXLowerAlloca::runOnBasicBlock(BasicBlock &BB) {
+ bool Changed = false;
+ for (auto &I : BB) {
+ if (auto allocaInst = dyn_cast<AllocaInst>(&I)) {
+ Changed = true;
+ auto PTy = dyn_cast<PointerType>(allocaInst->getType());
+ auto ETy = PTy->getElementType();
+ auto LocalAddrTy = PointerType::get(ETy, ADDRESS_SPACE_LOCAL);
+ auto NewASCToLocal = new AddrSpaceCastInst(allocaInst, LocalAddrTy, "");
+ auto GenericAddrTy = PointerType::get(ETy, ADDRESS_SPACE_GENERIC);
+ auto NewASCToGeneric = new AddrSpaceCastInst(NewASCToLocal,
+ GenericAddrTy, "");
+ NewASCToLocal->insertAfter(allocaInst);
+ NewASCToGeneric->insertAfter(NewASCToLocal);
+ for (Value::use_iterator UI = allocaInst->use_begin(),
+ UE = allocaInst->use_end();
+ UI != UE; ) {
+ // Check Load, Store, GEP, and BitCast Uses on alloca and make them
+ // use the converted generic address, in order to expose non-generic
+ // addrspacecast to NVPTXFavorNonGenericAddrSpace. For other types
+ // of instructions this is unecessary and may introduce redudant
+ // address cast.
+ const auto &AllocaUse = *UI++;
+ auto LI = dyn_cast<LoadInst>(AllocaUse.getUser());
+ if (LI && LI->getPointerOperand() == allocaInst && !LI->isVolatile()) {
+ LI->setOperand(LI->getPointerOperandIndex(), NewASCToGeneric);
+ continue;
+ }
+ auto SI = dyn_cast<StoreInst>(AllocaUse.getUser());
+ if (SI && SI->getPointerOperand() == allocaInst && !SI->isVolatile()) {
+ SI->setOperand(SI->getPointerOperandIndex(), NewASCToGeneric);
+ continue;
+ }
+ auto GI = dyn_cast<GetElementPtrInst>(AllocaUse.getUser());
+ if (GI && GI->getPointerOperand() == allocaInst) {
+ GI->setOperand(GI->getPointerOperandIndex(), NewASCToGeneric);
+ continue;
+ }
+ auto BI = dyn_cast<BitCastInst>(AllocaUse.getUser());
+ if (BI && BI->getOperand(0) == allocaInst) {
+ BI->setOperand(0, NewASCToGeneric);
+ continue;
+ }
+ }
+ }
+ }
+ return Changed;
+}
+
+BasicBlockPass *llvm::createNVPTXLowerAllocaPass() {
+ return new NVPTXLowerAlloca();
+}
diff --git a/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h b/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h
index 10f1135ad841..4b9322c77a40 100644
--- a/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h
+++ b/lib/Target/NVPTX/NVPTXMachineFunctionInfo.h
@@ -46,6 +46,6 @@ public:
return ImageHandleList[Idx].c_str();
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
index 5fd69a6815a8..ea58f7787489 100644
--- a/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
+++ b/lib/Target/NVPTX/NVPTXPrologEpilogPass.cpp
@@ -39,7 +39,7 @@ public:
private:
void calculateFrameObjectOffsets(MachineFunction &Fn);
};
-}
+} // namespace
MachineFunctionPass *llvm::createNVPTXPrologEpilogPass() {
return new NVPTXPrologEpilogPass();
diff --git a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
index 6e97f9efbc27..3ef997b006fa 100644
--- a/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXRegisterInfo.cpp
@@ -69,7 +69,7 @@ std::string getNVPTXRegClassStr(TargetRegisterClass const *RC) {
}
return "";
}
-}
+} // namespace llvm
NVPTXRegisterInfo::NVPTXRegisterInfo() : NVPTXGenRegisterInfo(0) {}
diff --git a/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp b/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
index e83f735a551e..bb0adc59a3fd 100644
--- a/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
+++ b/lib/Target/NVPTX/NVPTXReplaceImageHandles.cpp
@@ -45,7 +45,7 @@ private:
bool findIndexForHandle(MachineOperand &Op, MachineFunction &MF,
unsigned &Idx);
};
-}
+} // namespace
char NVPTXReplaceImageHandles::ID = 0;
diff --git a/lib/Target/NVPTX/NVPTXSubtarget.cpp b/lib/Target/NVPTX/NVPTXSubtarget.cpp
index 069d6e179dde..71645dca69c5 100644
--- a/lib/Target/NVPTX/NVPTXSubtarget.cpp
+++ b/lib/Target/NVPTX/NVPTXSubtarget.cpp
@@ -43,7 +43,7 @@ NVPTXSubtarget &NVPTXSubtarget::initializeSubtargetDependencies(StringRef CPU,
return *this;
}
-NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU,
+NVPTXSubtarget::NVPTXSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS,
const NVPTXTargetMachine &TM)
: NVPTXGenSubtargetInfo(TT, CPU, FS), PTXVersion(0), SmVersion(20), TM(TM),
diff --git a/lib/Target/NVPTX/NVPTXSubtarget.h b/lib/Target/NVPTX/NVPTXSubtarget.h
index e9833e5823c3..d4520451d37d 100644
--- a/lib/Target/NVPTX/NVPTXSubtarget.h
+++ b/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -52,7 +52,7 @@ public:
/// This constructor initializes the data members to match that
/// of the specified module.
///
- NVPTXSubtarget(const std::string &TT, const std::string &CPU,
+ NVPTXSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const NVPTXTargetMachine &TM);
const TargetFrameLowering *getFrameLowering() const override {
@@ -103,6 +103,6 @@ public:
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index a6466687bc7b..c071ee82abc6 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -54,6 +54,7 @@ void initializeNVPTXAllocaHoistingPass(PassRegistry &);
void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&);
void initializeNVPTXFavorNonGenericAddrSpacesPass(PassRegistry &);
void initializeNVPTXLowerKernelArgsPass(PassRegistry &);
+void initializeNVPTXLowerAllocaPass(PassRegistry &);
}
extern "C" void LLVMInitializeNVPTXTarget() {
@@ -70,6 +71,7 @@ extern "C" void LLVMInitializeNVPTXTarget() {
initializeNVPTXFavorNonGenericAddrSpacesPass(
*PassRegistry::getPassRegistry());
initializeNVPTXLowerKernelArgsPass(*PassRegistry::getPassRegistry());
+ initializeNVPTXLowerAllocaPass(*PassRegistry::getPassRegistry());
}
static std::string computeDataLayout(bool is64Bit) {
@@ -83,7 +85,7 @@ static std::string computeDataLayout(bool is64Bit) {
return Ret;
}
-NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, StringRef TT,
+NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
@@ -92,7 +94,7 @@ NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, StringRef TT,
CM, OL),
is64bit(is64bit), TLOF(make_unique<NVPTXTargetObjectFile>()),
Subtarget(TT, CPU, FS, *this) {
- if (Triple(TT).getOS() == Triple::NVCL)
+ if (TT.getOS() == Triple::NVCL)
drvInterface = NVPTX::NVCL;
else
drvInterface = NVPTX::CUDA;
@@ -103,18 +105,20 @@ NVPTXTargetMachine::~NVPTXTargetMachine() {}
void NVPTXTargetMachine32::anchor() {}
-NVPTXTargetMachine32::NVPTXTargetMachine32(
- const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
+NVPTXTargetMachine32::NVPTXTargetMachine32(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
: NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
void NVPTXTargetMachine64::anchor() {}
-NVPTXTargetMachine64::NVPTXTargetMachine64(
- const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL)
+NVPTXTargetMachine64::NVPTXTargetMachine64(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
: NVPTXTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
namespace {
@@ -164,12 +168,11 @@ void NVPTXPassConfig::addIRPasses() {
addPass(createNVPTXAssignValidGlobalNamesPass());
addPass(createGenericToNVVMPass());
addPass(createNVPTXLowerKernelArgsPass(&getNVPTXTargetMachine()));
- addPass(createNVPTXFavorNonGenericAddrSpacesPass());
// NVPTXLowerKernelArgs emits alloca for byval parameters which can often
- // be eliminated by SROA. We do not run SROA right after NVPTXLowerKernelArgs
- // because we plan to merge NVPTXLowerKernelArgs and
- // NVPTXFavorNonGenericAddrSpaces into one pass.
+ // be eliminated by SROA.
addPass(createSROAPass());
+ addPass(createNVPTXLowerAllocaPass());
+ addPass(createNVPTXFavorNonGenericAddrSpacesPass());
// FavorNonGenericAddrSpaces shortcuts unnecessary addrspacecasts, and leave
// them unused. We could remove dead code in an ad-hoc manner, but that
// requires manual work and might be error-prone.
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.h b/lib/Target/NVPTX/NVPTXTargetMachine.h
index 2cd10e87f620..da7f62bf9d9b 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.h
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -34,9 +34,10 @@ class NVPTXTargetMachine : public LLVMTargetMachine {
ManagedStringPool ManagedStrPool;
public:
- NVPTXTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM,
- CodeModel::Model CM, CodeGenOpt::Level OP, bool is64bit);
+ NVPTXTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OP,
+ bool is64bit);
~NVPTXTargetMachine() override;
const NVPTXSubtarget *getSubtargetImpl(const Function &) const override {
@@ -67,7 +68,7 @@ public:
class NVPTXTargetMachine32 : public NVPTXTargetMachine {
virtual void anchor();
public:
- NVPTXTargetMachine32(const Target &T, StringRef TT, StringRef CPU,
+ NVPTXTargetMachine32(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
@@ -76,7 +77,7 @@ public:
class NVPTXTargetMachine64 : public NVPTXTargetMachine {
virtual void anchor();
public:
- NVPTXTargetMachine64(const Target &T, StringRef TT, StringRef CPU,
+ NVPTXTargetMachine64(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
diff --git a/lib/Target/NVPTX/NVPTXUtilities.h b/lib/Target/NVPTX/NVPTXUtilities.h
index 7e2ce73daaa3..4d937c6a8bec 100644
--- a/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/lib/Target/NVPTX/NVPTXUtilities.h
@@ -91,6 +91,6 @@ void dumpInstRec(Value *v, std::set<Instruction *> *visited);
void dumpInstRec(Value *v);
void dumpParent(Value *v);
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/NVPTX/NVVMReflect.cpp b/lib/Target/NVPTX/NVVMReflect.cpp
index 5e375b7852e4..1c2043069e1e 100644
--- a/lib/Target/NVPTX/NVVMReflect.cpp
+++ b/lib/Target/NVPTX/NVVMReflect.cpp
@@ -75,7 +75,7 @@ private:
bool handleFunction(Function *ReflectFunction);
void setVarMap();
};
-}
+} // namespace
ModulePass *llvm::createNVVMReflectPass() {
return new NVVMReflect();
diff --git a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
index 1736d03961f7..a699a55d3cbf 100644
--- a/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
+++ b/lib/Target/PowerPC/AsmParser/PPCAsmParser.cpp
@@ -1184,6 +1184,13 @@ void PPCAsmParser::ProcessInstruction(MCInst &Inst,
Inst = TmpInst;
break;
}
+ case PPC::MFTB: {
+ if (STI.getFeatureBits()[PPC::FeatureMFTB]) {
+ assert(Inst.getNumOperands() == 2 && "Expecting two operands");
+ Inst.setOpcode(PPC::MFSPR);
+ }
+ break;
+ }
}
}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
index 72742dc3ee20..b6dd595ffb0e 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp
@@ -230,11 +230,11 @@ namespace {
MCAsmBackend *llvm::createPPCAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
- if (Triple(TT).isOSDarwin())
+ const Triple &TT, StringRef CPU) {
+ if (TT.isOSDarwin())
return new DarwinPPCAsmBackend(T);
- uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
- bool IsLittleEndian = Triple(TT).getArch() == Triple::ppc64le;
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS());
+ bool IsLittleEndian = TT.getArch() == Triple::ppc64le;
return new ELFPPCAsmBackend(T, IsLittleEndian, OSABI);
}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
index 992be5b966c1..36119d5d7e46 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
@@ -31,7 +31,7 @@ namespace {
bool needsRelocateWithSymbol(const MCSymbol &Sym,
unsigned Type) const override;
};
-}
+} // namespace
PPCELFObjectWriter::PPCELFObjectWriter(bool Is64Bit, uint8_t OSABI)
: MCELFObjectTargetWriter(Is64Bit, OSABI,
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h
index ae43e59d3cb1..ad614f2ddf35 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h
@@ -50,7 +50,7 @@ enum Fixups {
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
};
-}
-}
+} // namespace PPC
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
index 95379246f301..b7291561c75d 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp
@@ -309,7 +309,7 @@ unsigned PPCMCCodeEmitter::getTLSRegEncoding(const MCInst &MI, unsigned OpNo,
// Return the thread-pointer register's encoding.
Fixups.push_back(MCFixup::create(0, MO.getExpr(),
(MCFixupKind)PPC::fixup_ppc_nofixup));
- Triple TT(STI.getTargetTriple());
+ const Triple &TT = STI.getTargetTriple();
bool isPPC64 = TT.getArch() == Triple::ppc64 || TT.getArch() == Triple::ppc64le;
return CTX.getRegisterInfo()->getEncodingValue(isPPC64 ? PPC::X13 : PPC::R2);
}
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index 1e8e8046669d..489905b26fcc 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -63,8 +63,8 @@ static MCRegisterInfo *createPPCMCRegisterInfo(StringRef TT) {
return X;
}
-static MCSubtargetInfo *createPPCMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
+static MCSubtargetInfo *createPPCMCSubtargetInfo(const Triple &TT,
+ StringRef CPU, StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
InitPPCMCSubtargetInfo(X, TT, CPU, FS);
return X;
@@ -219,7 +219,7 @@ public:
llvm_unreachable("Unknown pseudo-op: .localentry");
}
};
-}
+} // namespace
static MCTargetStreamer *createAsmTargetStreamer(MCStreamer &S,
formatted_raw_ostream &OS,
@@ -230,7 +230,7 @@ static MCTargetStreamer *createAsmTargetStreamer(MCStreamer &S,
static MCTargetStreamer *
createObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
- Triple TT(STI.getTargetTriple());
+ const Triple &TT = STI.getTargetTriple();
if (TT.getObjectFormat() == Triple::ELF)
return new PPCTargetELFStreamer(S);
return new PPCTargetMachOStreamer(S);
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
index 5f2117c88e46..18818a1c335e 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h
@@ -29,6 +29,7 @@ class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
class Target;
+class Triple;
class StringRef;
class raw_pwrite_stream;
class raw_ostream;
@@ -42,7 +43,7 @@ MCCodeEmitter *createPPCMCCodeEmitter(const MCInstrInfo &MCII,
MCContext &Ctx);
MCAsmBackend *createPPCAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
/// Construct an PPC ELF object writer.
MCObjectWriter *createPPCELFObjectWriter(raw_pwrite_stream &OS, bool Is64Bit,
@@ -80,7 +81,7 @@ static inline bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME) {
return false;
}
-} // End llvm namespace
+} // namespace llvm
// Generated files will use "namespace PPC". To avoid symbol clash,
// undefine PPC here. PPC may be predefined on some hosts.
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
index 9d7289658f0f..9b5491f92491 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMachObjectWriter.cpp
@@ -51,7 +51,7 @@ public:
FixedValue);
}
};
-}
+} // namespace
/// computes the log2 of the size of the relocation,
/// used for relocation_info::r_length.
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
index 6075631a541f..ff9b059d906a 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCPredicates.h
@@ -62,7 +62,7 @@ namespace PPC {
/// Assume the condition register is set by MI(a,b), return the predicate if
/// we modify the instructions such that condition register is set by MI(b,a).
Predicate getSwappedPredicate(Predicate Opcode);
-}
-}
+} // namespace PPC
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/PPC.h b/lib/Target/PowerPC/PPC.h
index ae8d8b4f5dfe..49f77b538c1b 100644
--- a/lib/Target/PowerPC/PPC.h
+++ b/lib/Target/PowerPC/PPC.h
@@ -98,6 +98,6 @@ namespace llvm {
};
} // end namespace PPCII
-} // end namespace llvm;
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/PPC.td b/lib/Target/PowerPC/PPC.td
index 1a02bcca9362..641b2377de40 100644
--- a/lib/Target/PowerPC/PPC.td
+++ b/lib/Target/PowerPC/PPC.td
@@ -135,9 +135,9 @@ def FeatureInvariantFunctionDescriptors :
"Assume function descriptors are invariant">;
def FeatureHTM : SubtargetFeature<"htm", "HasHTM", "true",
"Enable Hardware Transactional Memory instructions">;
+def FeatureMFTB : SubtargetFeature<"", "FeatureMFTB", "true",
+ "Implement mftb using the mfspr instruction">;
-def DeprecatedMFTB : SubtargetFeature<"", "DeprecatedMFTB", "true",
- "Treat mftb as deprecated">;
def DeprecatedDST : SubtargetFeature<"", "DeprecatedDST", "true",
"Treat vector data stream cache control instructions as deprecated">;
@@ -165,7 +165,7 @@ def ProcessorFeatures {
FeaturePOPCNTD, FeatureCMPB, FeatureLDBRX,
Feature64Bit /*, Feature64BitRegs */,
FeatureBPERMD, FeatureExtDiv,
- DeprecatedMFTB, DeprecatedDST];
+ FeatureMFTB, DeprecatedDST];
list<SubtargetFeature> Power8SpecificFeatures =
[DirectivePwr8, FeatureP8Altivec, FeatureP8Vector, FeatureP8Crypto,
FeatureHTM, FeatureDirectMove, FeatureICBT, FeaturePartwordAtomic];
@@ -247,61 +247,75 @@ include "PPCInstrInfo.td"
// PowerPC processors supported.
//
-def : Processor<"generic", G3Itineraries, [Directive32]>;
+def : Processor<"generic", G3Itineraries, [Directive32, FeatureMFTB]>;
def : ProcessorModel<"440", PPC440Model, [Directive440, FeatureISEL,
FeatureFRES, FeatureFRSQRTE,
FeatureICBT, FeatureBookE,
- FeatureMSYNC, DeprecatedMFTB]>;
+ FeatureMSYNC, FeatureMFTB]>;
def : ProcessorModel<"450", PPC440Model, [Directive440, FeatureISEL,
FeatureFRES, FeatureFRSQRTE,
FeatureICBT, FeatureBookE,
- FeatureMSYNC, DeprecatedMFTB]>;
+ FeatureMSYNC, FeatureMFTB]>;
def : Processor<"601", G3Itineraries, [Directive601]>;
-def : Processor<"602", G3Itineraries, [Directive602]>;
+def : Processor<"602", G3Itineraries, [Directive602,
+ FeatureMFTB]>;
def : Processor<"603", G3Itineraries, [Directive603,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"603e", G3Itineraries, [Directive603,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"603ev", G3Itineraries, [Directive603,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"604", G3Itineraries, [Directive604,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"604e", G3Itineraries, [Directive604,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"620", G3Itineraries, [Directive620,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"750", G4Itineraries, [Directive750,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"g3", G3Itineraries, [Directive750,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"7400", G4Itineraries, [Directive7400, FeatureAltivec,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"g4", G4Itineraries, [Directive7400, FeatureAltivec,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"7450", G4PlusItineraries, [Directive7400, FeatureAltivec,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : Processor<"g4+", G4PlusItineraries, [Directive7400, FeatureAltivec,
- FeatureFRES, FeatureFRSQRTE]>;
+ FeatureFRES, FeatureFRSQRTE,
+ FeatureMFTB]>;
def : ProcessorModel<"970", G5Model,
[Directive970, FeatureAltivec,
FeatureMFOCRF, FeatureFSqrt,
FeatureFRES, FeatureFRSQRTE, FeatureSTFIWX,
- Feature64Bit /*, Feature64BitRegs */]>;
+ Feature64Bit /*, Feature64BitRegs */,
+ FeatureMFTB]>;
def : ProcessorModel<"g5", G5Model,
[Directive970, FeatureAltivec,
FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX,
FeatureFRES, FeatureFRSQRTE,
Feature64Bit /*, Feature64BitRegs */,
- DeprecatedMFTB, DeprecatedDST]>;
+ FeatureMFTB, DeprecatedDST]>;
def : ProcessorModel<"e500mc", PPCE500mcModel,
[DirectiveE500mc, FeatureMFOCRF,
FeatureSTFIWX, FeatureICBT, FeatureBookE,
- FeatureISEL, DeprecatedMFTB]>;
+ FeatureISEL, FeatureMFTB]>;
def : ProcessorModel<"e5500", PPCE5500Model,
[DirectiveE5500, FeatureMFOCRF, Feature64Bit,
FeatureSTFIWX, FeatureICBT, FeatureBookE,
- FeatureISEL, DeprecatedMFTB]>;
+ FeatureISEL, FeatureMFTB]>;
def : ProcessorModel<"a2", PPCA2Model,
[DirectiveA2, FeatureICBT, FeatureBookE, FeatureMFOCRF,
FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
@@ -309,7 +323,7 @@ def : ProcessorModel<"a2", PPCA2Model,
FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
FeaturePOPCNTD, FeatureCMPB, FeatureLDBRX, Feature64Bit
- /*, Feature64BitRegs */, DeprecatedMFTB]>;
+ /*, Feature64BitRegs */, FeatureMFTB]>;
def : ProcessorModel<"a2q", PPCA2Model,
[DirectiveA2, FeatureICBT, FeatureBookE, FeatureMFOCRF,
FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
@@ -317,7 +331,7 @@ def : ProcessorModel<"a2q", PPCA2Model,
FeatureSTFIWX, FeatureLFIWAX,
FeatureFPRND, FeatureFPCVT, FeatureISEL,
FeaturePOPCNTD, FeatureCMPB, FeatureLDBRX, Feature64Bit
- /*, Feature64BitRegs */, FeatureQPX, DeprecatedMFTB]>;
+ /*, Feature64BitRegs */, FeatureQPX, FeatureMFTB]>;
def : ProcessorModel<"pwr3", G5Model,
[DirectivePwr3, FeatureAltivec,
FeatureFRES, FeatureFRSQRTE, FeatureMFOCRF,
@@ -325,41 +339,42 @@ def : ProcessorModel<"pwr3", G5Model,
def : ProcessorModel<"pwr4", G5Model,
[DirectivePwr4, FeatureAltivec, FeatureMFOCRF,
FeatureFSqrt, FeatureFRES, FeatureFRSQRTE,
- FeatureSTFIWX, Feature64Bit]>;
+ FeatureSTFIWX, Feature64Bit, FeatureMFTB]>;
def : ProcessorModel<"pwr5", G5Model,
[DirectivePwr5, FeatureAltivec, FeatureMFOCRF,
FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES,
FeatureSTFIWX, Feature64Bit,
- DeprecatedMFTB, DeprecatedDST]>;
+ FeatureMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr5x", G5Model,
[DirectivePwr5x, FeatureAltivec, FeatureMFOCRF,
FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES,
FeatureSTFIWX, FeatureFPRND, Feature64Bit,
- DeprecatedMFTB, DeprecatedDST]>;
+ FeatureMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr6", G5Model,
[DirectivePwr6, FeatureAltivec,
FeatureMFOCRF, FeatureFCPSGN, FeatureFSqrt, FeatureFRE,
FeatureFRES, FeatureFRSQRTE, FeatureFRSQRTES,
FeatureRecipPrec, FeatureSTFIWX, FeatureLFIWAX, FeatureCMPB,
FeatureFPRND, Feature64Bit /*, Feature64BitRegs */,
- DeprecatedMFTB, DeprecatedDST]>;
+ FeatureMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr6x", G5Model,
[DirectivePwr5x, FeatureAltivec, FeatureMFOCRF,
FeatureFCPSGN, FeatureFSqrt, FeatureFRE, FeatureFRES,
FeatureFRSQRTE, FeatureFRSQRTES, FeatureRecipPrec,
FeatureSTFIWX, FeatureLFIWAX, FeatureCMPB,
FeatureFPRND, Feature64Bit,
- DeprecatedMFTB, DeprecatedDST]>;
+ FeatureMFTB, DeprecatedDST]>;
def : ProcessorModel<"pwr7", P7Model, ProcessorFeatures.Power7FeatureList>;
def : ProcessorModel<"pwr8", P8Model, ProcessorFeatures.Power8FeatureList>;
-def : Processor<"ppc", G3Itineraries, [Directive32]>;
+def : Processor<"ppc", G3Itineraries, [Directive32, FeatureMFTB]>;
def : ProcessorModel<"ppc64", G5Model,
[Directive64, FeatureAltivec,
FeatureMFOCRF, FeatureFSqrt, FeatureFRES,
FeatureFRSQRTE, FeatureSTFIWX,
- Feature64Bit /*, Feature64BitRegs */]>;
+ Feature64Bit /*, Feature64BitRegs */,
+ FeatureMFTB]>;
def : ProcessorModel<"ppc64le", P8Model, ProcessorFeatures.Power8FeatureList>;
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp
index b42b0f9ef478..87a5236e711f 100644
--- a/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -440,7 +440,7 @@ void PPCAsmPrinter::EmitTlsCall(const MachineInstr *MI,
void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
MCInst TmpInst;
bool isPPC64 = Subtarget->isPPC64();
- bool isDarwin = Triple(TM.getTargetTriple()).isOSDarwin();
+ bool isDarwin = TM.getTargetTriple().isOSDarwin();
const Module *M = MF->getFunction()->getParent();
PICLevel::Level PL = M->getPICLevel();
@@ -1276,7 +1276,8 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
// freed) and since we're at the global level we can use the default
// constructed subtarget.
std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
- TM.getTargetTriple(), TM.getTargetCPU(), TM.getTargetFeatureString()));
+ TM.getTargetTriple().str(), TM.getTargetCPU(),
+ TM.getTargetFeatureString()));
auto EmitToStreamer = [&STI] (MCStreamer &S, const MCInst &Inst) {
S.EmitInstruction(Inst, *STI);
};
@@ -1510,7 +1511,7 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
static AsmPrinter *
createPPCAsmPrinterPass(TargetMachine &tm,
std::unique_ptr<MCStreamer> &&Streamer) {
- if (Triple(tm.getTargetTriple()).isMacOSX())
+ if (tm.getTargetTriple().isMacOSX())
return new PPCDarwinAsmPrinter(tm, std::move(Streamer));
return new PPCLinuxAsmPrinter(tm, std::move(Streamer));
}
diff --git a/lib/Target/PowerPC/PPCBranchSelector.cpp b/lib/Target/PowerPC/PPCBranchSelector.cpp
index 940d55ac1f36..2b6030aea2b1 100644
--- a/lib/Target/PowerPC/PPCBranchSelector.cpp
+++ b/lib/Target/PowerPC/PPCBranchSelector.cpp
@@ -51,7 +51,7 @@ namespace {
}
};
char PPCBSel::ID = 0;
-}
+} // namespace
INITIALIZE_PASS(PPCBSel, "ppc-branch-select", "PowerPC Branch Selector",
false, false)
diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp
index 69afd681d404..416131745806 100644
--- a/lib/Target/PowerPC/PPCCTRLoops.cpp
+++ b/lib/Target/PowerPC/PPCCTRLoops.cpp
@@ -417,8 +417,8 @@ bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
bool PPCCTRLoops::convertToCTRLoop(Loop *L) {
bool MadeChange = false;
- Triple TT = Triple(L->getHeader()->getParent()->getParent()->
- getTargetTriple());
+ const Triple TT =
+ Triple(L->getHeader()->getParent()->getParent()->getTargetTriple());
if (!TT.isArch32Bit() && !TT.isArch64Bit())
return MadeChange; // Unknown arch. type.
diff --git a/lib/Target/PowerPC/PPCCallingConv.h b/lib/Target/PowerPC/PPCCallingConv.h
index eb904a858592..550cac62927e 100644
--- a/lib/Target/PowerPC/PPCCallingConv.h
+++ b/lib/Target/PowerPC/PPCCallingConv.h
@@ -29,7 +29,7 @@ inline bool CC_PPC_AnyReg_Error(unsigned &, MVT &, MVT &,
return false;
}
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/PPCEarlyReturn.cpp b/lib/Target/PowerPC/PPCEarlyReturn.cpp
index fc89753ed94e..9cd9c2faa51f 100644
--- a/lib/Target/PowerPC/PPCEarlyReturn.cpp
+++ b/lib/Target/PowerPC/PPCEarlyReturn.cpp
@@ -191,7 +191,7 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
INITIALIZE_PASS(PPCEarlyReturn, DEBUG_TYPE,
"PowerPC Early-Return Creation", false, false)
diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp
index a561d5b1190a..82ff5307d0b7 100644
--- a/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/lib/Target/PowerPC/PPCFastISel.cpp
@@ -2347,4 +2347,4 @@ namespace llvm {
return new PPCFastISel(FuncInfo, LibInfo);
return nullptr;
}
-}
+} // namespace llvm
diff --git a/lib/Target/PowerPC/PPCFrameLowering.h b/lib/Target/PowerPC/PPCFrameLowering.h
index 28d074ecd79d..b232863c9614 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.h
+++ b/lib/Target/PowerPC/PPCFrameLowering.h
@@ -93,6 +93,6 @@ public:
const SpillSlot *
getCalleeSavedSpillSlots(unsigned &NumEntries) const override;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index afc1f36ad152..5f9f9f2e341f 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -234,7 +234,7 @@ private:
SDNode *transferMemOperands(SDNode *N, SDNode *Result);
};
-}
+} // namespace
/// InsertVRSaveCode - Once the entire function has been instruction selected,
/// all virtual registers are created and all machine instructions are built,
@@ -1301,12 +1301,9 @@ class BitPermutationSelector {
// Now, remove all groups with this underlying value and rotation
// factor.
- for (auto I = BitGroups.begin(); I != BitGroups.end();) {
- if (I->V == VRI.V && I->RLAmt == VRI.RLAmt)
- I = BitGroups.erase(I);
- else
- ++I;
- }
+ eraseMatchingBitGroups([VRI](const BitGroup &BG) {
+ return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt;
+ });
}
}
@@ -1337,12 +1334,9 @@ class BitPermutationSelector {
}
// Now, remove all groups with this underlying value and rotation factor.
- for (auto I = BitGroups.begin(); I != BitGroups.end();) {
- if (I->V == VRI.V && I->RLAmt == VRI.RLAmt)
- I = BitGroups.erase(I);
- else
- ++I;
- }
+ eraseMatchingBitGroups([VRI](const BitGroup &BG) {
+ return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt;
+ });
}
if (InstCnt) *InstCnt += BitGroups.size();
@@ -1544,7 +1538,7 @@ class BitPermutationSelector {
// Repl32 true, but are trivially convertable to Repl32 false. Such a
// group is trivially convertable if it overlaps only with the lower 32
// bits, and the group has not been coalesced.
- auto MatchingBG = [VRI](BitGroup &BG) {
+ auto MatchingBG = [VRI](const BitGroup &BG) {
if (VRI.V != BG.V)
return false;
@@ -1675,12 +1669,7 @@ class BitPermutationSelector {
// Now, remove all groups with this underlying value and rotation
// factor.
- for (auto I = BitGroups.begin(); I != BitGroups.end();) {
- if (MatchingBG(*I))
- I = BitGroups.erase(I);
- else
- ++I;
- }
+ eraseMatchingBitGroups(MatchingBG);
}
}
@@ -1740,12 +1729,10 @@ class BitPermutationSelector {
// Now, remove all groups with this underlying value and rotation factor.
if (Res)
- for (auto I = BitGroups.begin(); I != BitGroups.end();) {
- if (I->V == VRI.V && I->RLAmt == VRI.RLAmt && I->Repl32 == VRI.Repl32)
- I = BitGroups.erase(I);
- else
- ++I;
- }
+ eraseMatchingBitGroups([VRI](const BitGroup &BG) {
+ return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt &&
+ BG.Repl32 == VRI.Repl32;
+ });
}
// Because 64-bit rotates are more flexible than inserts, we might have a
@@ -1846,6 +1833,11 @@ class BitPermutationSelector {
return nullptr;
}
+ void eraseMatchingBitGroups(function_ref<bool(const BitGroup &)> F) {
+ BitGroups.erase(std::remove_if(BitGroups.begin(), BitGroups.end(), F),
+ BitGroups.end());
+ }
+
SmallVector<ValueBit, 64> Bits;
bool HasZeros;
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 2600ee5db179..1cdfb4178544 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -3765,7 +3765,7 @@ struct TailCallArgumentInfo {
TailCallArgumentInfo() : FrameIdx(0) {}
};
-}
+} // namespace
/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
static void
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index 7fd3f9c3de3d..c33d60565b79 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -353,7 +353,7 @@ namespace llvm {
/// the last operand.
TOC_ENTRY
};
- }
+ } // namespace PPCISD
/// Define some predicates that are used for node matching.
namespace PPC {
@@ -405,7 +405,7 @@ namespace llvm {
/// If this is a qvaligni shuffle mask, return the shift
/// amount, otherwise return -1.
int isQVALIGNIShuffleMask(SDNode *N);
- }
+ } // namespace PPC
class PPCTargetLowering : public TargetLowering {
const PPCSubtarget &Subtarget;
@@ -871,6 +871,6 @@ namespace llvm {
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
CCState &State);
-}
+} // namespace llvm
#endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td
index e27bf7f5c0e0..9ff604bbee9d 100644
--- a/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -1142,7 +1142,9 @@ def:Pat<(vpkudum_unary_shuffle v16i8:$vA, undef),
def:Pat<(vpkudum_swapped_shuffle v16i8:$vA, v16i8:$vB),
(VPKUDUM $vB, $vA)>;
-
+def VGBBD : VX2_Int_Ty2<1292, "vgbbd", int_ppc_altivec_vgbbd, v16i8, v16i8>;
+def VBPERMQ : VX1_Int_Ty2<1356, "vbpermq", int_ppc_altivec_vbpermq,
+ v2i64, v16i8>;
} // end HasP8Altivec
// Crypto instructions (from builtins)
diff --git a/lib/Target/PowerPC/PPCInstrBuilder.h b/lib/Target/PowerPC/PPCInstrBuilder.h
index cf71b1c59869..ec94fa5580ff 100644
--- a/lib/Target/PowerPC/PPCInstrBuilder.h
+++ b/lib/Target/PowerPC/PPCInstrBuilder.h
@@ -38,6 +38,6 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0,
return MIB.addFrameIndex(FI).addImm(Offset);
}
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index b4bb50c80937..d3bb7a63c622 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -548,7 +548,7 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
@@ -593,7 +593,7 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
// Select analysis.
bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg,
int &CondCycles, int &TrueCycles, int &FalseCycles) const {
if (!Subtarget.hasISEL())
@@ -634,8 +634,7 @@ bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc dl,
- unsigned DestReg,
- const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned DestReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
assert(Cond.size() == 2 &&
"PPC branch conditions have two components!");
@@ -1213,9 +1212,8 @@ bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
return !isPredicated(MI);
}
-bool PPCInstrInfo::PredicateInstruction(
- MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const {
+bool PPCInstrInfo::PredicateInstruction(MachineInstr *MI,
+ ArrayRef<MachineOperand> Pred) const {
unsigned OpC = MI->getOpcode();
if (OpC == PPC::BLR || OpC == PPC::BLR8) {
if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) {
@@ -1306,9 +1304,8 @@ bool PPCInstrInfo::PredicateInstruction(
return false;
}
-bool PPCInstrInfo::SubsumesPredicate(
- const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const {
+bool PPCInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const {
assert(Pred1.size() == 2 && "Invalid PPC first predicate");
assert(Pred2.size() == 2 && "Invalid PPC second predicate");
diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h
index 7fd076a7d1cd..39bf4547733c 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/lib/Target/PowerPC/PPCInstrInfo.h
@@ -106,7 +106,7 @@ public:
UseNode, UseIdx);
}
- bool hasLowDefLatency(const InstrItineraryData *ItinData,
+ bool hasLowDefLatency(const TargetSchedModel &SchedModel,
const MachineInstr *DefMI,
unsigned DefIdx) const override {
// Machine LICM should hoist all instructions in low-register-pressure
@@ -141,18 +141,14 @@ public:
bool AllowModify) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
// Select analysis.
- bool canInsertSelect(const MachineBasicBlock&,
- const SmallVectorImpl<MachineOperand> &Cond,
- unsigned, unsigned, int&, int&, int&) const override;
- void insertSelect(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DstReg,
- const SmallVectorImpl<MachineOperand> &Cond,
+ bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
+ unsigned, unsigned, int &, int &, int &) const override;
+ void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ DebugLoc DL, unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const override;
void copyPhysReg(MachineBasicBlock &MBB,
@@ -211,10 +207,10 @@ public:
bool isUnpredicatedTerminator(const MachineInstr *MI) const override;
bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const override;
+ ArrayRef<MachineOperand> Pred) const override;
- bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const override;
+ bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const override;
bool DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const override;
@@ -241,6 +237,6 @@ public:
void getNoopForMachoTarget(MCInst &NopInst) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td
index c5a044ce85fd..b50124db1ea1 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/lib/Target/PowerPC/PPCInstrInfo.td
@@ -2225,7 +2225,7 @@ def MTSPR : XFXForm_1<31, 467, (outs), (ins i32imm:$SPR, gprc:$RT),
"mtspr $SPR, $RT", IIC_SprMTSPR>;
def MFTB : XFXForm_1<31, 371, (outs gprc:$RT), (ins i32imm:$SPR),
- "mftb $RT, $SPR", IIC_SprMFTB>, Deprecated<DeprecatedMFTB>;
+ "mftb $RT, $SPR", IIC_SprMFTB>;
// A pseudo-instruction used to implement the read of the 64-bit cycle counter
// on a 32-bit target.
diff --git a/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp b/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
index b4e1c099f190..e783b5e65333 100644
--- a/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
+++ b/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp
@@ -88,7 +88,7 @@ namespace {
const TargetTransformInfo *TTI;
const DataLayout *DL;
};
-}
+} // namespace
char PPCLoopDataPrefetch::ID = 0;
INITIALIZE_PASS_BEGIN(PPCLoopDataPrefetch, "ppc-loop-data-prefetch",
diff --git a/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
index b6e7799402e1..1891b6315c51 100644
--- a/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
+++ b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
@@ -87,7 +87,7 @@ namespace {
LoopInfo *LI;
ScalarEvolution *SE;
};
-}
+} // namespace
char PPCLoopPreIncPrep::ID = 0;
static const char *name = "Prepare loop for pre-inc. addressing modes";
@@ -113,7 +113,7 @@ namespace {
protected:
ScalarEvolution *SE;
};
-}
+} // namespace
static bool IsPtrInBounds(Value *BasePtr) {
Value *StrippedBasePtr = BasePtr;
diff --git a/lib/Target/PowerPC/PPCMCInstLower.cpp b/lib/Target/PowerPC/PPCMCInstLower.cpp
index 05cb6e11db67..c44d5d70f8dc 100644
--- a/lib/Target/PowerPC/PPCMCInstLower.cpp
+++ b/lib/Target/PowerPC/PPCMCInstLower.cpp
@@ -40,7 +40,7 @@ static MCSymbol *GetSymbolFromOperand(const MachineOperand &MO, AsmPrinter &AP){
Mangler *Mang = AP.Mang;
const DataLayout *DL = TM.getDataLayout();
MCContext &Ctx = AP.OutContext;
- bool isDarwin = Triple(TM.getTargetTriple()).isOSDarwin();
+ bool isDarwin = TM.getTargetTriple().isOSDarwin();
SmallString<128> Name;
StringRef Suffix;
diff --git a/lib/Target/PowerPC/PPCSelectionDAGInfo.h b/lib/Target/PowerPC/PPCSelectionDAGInfo.h
index 2c1378d5670d..d2eaeb42dbc4 100644
--- a/lib/Target/PowerPC/PPCSelectionDAGInfo.h
+++ b/lib/Target/PowerPC/PPCSelectionDAGInfo.h
@@ -26,6 +26,6 @@ public:
~PPCSelectionDAGInfo();
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/PPCSubtarget.cpp b/lib/Target/PowerPC/PPCSubtarget.cpp
index f313b0a6f178..cf603fe17723 100644
--- a/lib/Target/PowerPC/PPCSubtarget.cpp
+++ b/lib/Target/PowerPC/PPCSubtarget.cpp
@@ -47,7 +47,7 @@ PPCSubtarget &PPCSubtarget::initializeSubtargetDependencies(StringRef CPU,
return *this;
}
-PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU,
+PPCSubtarget::PPCSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const PPCTargetMachine &TM)
: PPCGenSubtargetInfo(TT, CPU, FS), TargetTriple(TT),
IsPPC64(TargetTriple.getArch() == Triple::ppc64 ||
@@ -91,7 +91,7 @@ void PPCSubtarget::initializeEnvironment() {
IsPPC4xx = false;
IsPPC6xx = false;
IsE500 = false;
- DeprecatedMFTB = false;
+ FeatureMFTB = false;
DeprecatedDST = false;
HasLazyResolverStubs = false;
HasICBT = false;
@@ -175,7 +175,7 @@ bool PPCSubtarget::enableMachineScheduler() const {
}
// This overrides the PostRAScheduler bit in the SchedModel for each CPU.
-bool PPCSubtarget::enablePostMachineScheduler() const { return true; }
+bool PPCSubtarget::enablePostRAScheduler() const { return true; }
PPCGenSubtargetInfo::AntiDepBreakMode PPCSubtarget::getAntiDepBreakMode() const {
return TargetSubtargetInfo::ANTIDEP_ALL;
diff --git a/lib/Target/PowerPC/PPCSubtarget.h b/lib/Target/PowerPC/PPCSubtarget.h
index 8d955088634a..ea17e1c189b8 100644
--- a/lib/Target/PowerPC/PPCSubtarget.h
+++ b/lib/Target/PowerPC/PPCSubtarget.h
@@ -58,7 +58,7 @@ namespace PPC {
DIR_PWR8,
DIR_64
};
-}
+} // namespace PPC
class GlobalValue;
class TargetMachine;
@@ -110,7 +110,7 @@ protected:
bool IsE500;
bool IsPPC4xx;
bool IsPPC6xx;
- bool DeprecatedMFTB;
+ bool FeatureMFTB;
bool DeprecatedDST;
bool HasLazyResolverStubs;
bool IsLittleEndian;
@@ -135,8 +135,8 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
- PPCSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const PPCTargetMachine &TM);
+ PPCSubtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
+ const PPCTargetMachine &TM);
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
@@ -237,7 +237,7 @@ public:
bool isPPC4xx() const { return IsPPC4xx; }
bool isPPC6xx() const { return IsPPC6xx; }
bool isE500() const { return IsE500; }
- bool isDeprecatedMFTB() const { return DeprecatedMFTB; }
+ bool isFeatureMFTB() const { return FeatureMFTB; }
bool isDeprecatedDST() const { return DeprecatedDST; }
bool hasICBT() const { return HasICBT; }
bool hasInvariantFunctionDescriptors() const {
@@ -274,7 +274,7 @@ public:
// Scheduling customization.
bool enableMachineScheduler() const override;
// This overrides the PostRAScheduler bit in the SchedModel for each CPU.
- bool enablePostMachineScheduler() const override;
+ bool enablePostRAScheduler() const override;
AntiDepBreakMode getAntiDepBreakMode() const override;
void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const override;
@@ -286,6 +286,6 @@ public:
bool enableSubRegLiveness() const override;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/PPCTLSDynamicCall.cpp b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
index 2dc0d825c80d..7a9db0fabb07 100644
--- a/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
+++ b/lib/Target/PowerPC/PPCTLSDynamicCall.cpp
@@ -156,7 +156,7 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
INITIALIZE_PASS_BEGIN(PPCTLSDynamicCall, DEBUG_TYPE,
"PowerPC TLS Dynamic Call Fixup", false, false)
diff --git a/lib/Target/PowerPC/PPCTOCRegDeps.cpp b/lib/Target/PowerPC/PPCTOCRegDeps.cpp
index bf165c9edc6e..61b963fe6da5 100644
--- a/lib/Target/PowerPC/PPCTOCRegDeps.cpp
+++ b/lib/Target/PowerPC/PPCTOCRegDeps.cpp
@@ -145,7 +145,7 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
INITIALIZE_PASS(PPCTOCRegDeps, DEBUG_TYPE,
"PowerPC TOC Register Dependencies", false, false)
diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp
index 50d4395dfbe8..074bc870751a 100644
--- a/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -98,13 +98,12 @@ static std::string getDataLayoutString(const Triple &T) {
return Ret;
}
-static std::string computeFSAdditions(StringRef FS, CodeGenOpt::Level OL, StringRef TT) {
+static std::string computeFSAdditions(StringRef FS, CodeGenOpt::Level OL,
+ const Triple &TT) {
std::string FullFS = FS;
- Triple TargetTriple(TT);
// Make sure 64-bit features are available when CPUname is generic
- if (TargetTriple.getArch() == Triple::ppc64 ||
- TargetTriple.getArch() == Triple::ppc64le) {
+ if (TT.getArch() == Triple::ppc64 || TT.getArch() == Triple::ppc64le) {
if (!FullFS.empty())
FullFS = "+64bit," + FullFS;
else
@@ -165,14 +164,15 @@ static PPCTargetMachine::PPCABI computeTargetABI(const Triple &TT,
// with what are (currently) non-function specific overrides as it goes into the
// LLVMTargetMachine constructor and then using the stored value in the
// Subtarget constructor below it.
-PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options,
+PPCTargetMachine::PPCTargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, getDataLayoutString(Triple(TT)), TT, CPU,
+ : LLVMTargetMachine(T, getDataLayoutString(TT), TT, CPU,
computeFSAdditions(FS, OL, TT), Options, RM, CM, OL),
- TLOF(createTLOF(Triple(getTargetTriple()))),
- TargetABI(computeTargetABI(Triple(TT), Options)) {
+ TLOF(createTLOF(getTargetTriple())),
+ TargetABI(computeTargetABI(TT, Options)) {
initAsmInfo();
}
@@ -180,23 +180,21 @@ PPCTargetMachine::~PPCTargetMachine() {}
void PPC32TargetMachine::anchor() { }
-PPC32TargetMachine::PPC32TargetMachine(const Target &T, StringRef TT,
+PPC32TargetMachine::PPC32TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
-}
+ : PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
void PPC64TargetMachine::anchor() { }
-PPC64TargetMachine::PPC64TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
+PPC64TargetMachine::PPC64TargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
-}
+ : PPCTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
const PPCSubtarget *
PPCTargetMachine::getSubtargetImpl(const Function &F) const {
@@ -264,9 +262,8 @@ void PPCPassConfig::addIRPasses() {
// For the BG/Q (or if explicitly requested), add explicit data prefetch
// intrinsics.
- bool UsePrefetching =
- Triple(TM->getTargetTriple()).getVendor() == Triple::BGQ &&
- getOptLevel() != CodeGenOpt::None;
+ bool UsePrefetching = TM->getTargetTriple().getVendor() == Triple::BGQ &&
+ getOptLevel() != CodeGenOpt::None;
if (EnablePrefetch.getNumOccurrences() > 0)
UsePrefetching = EnablePrefetch;
if (UsePrefetching)
@@ -320,7 +317,7 @@ void PPCPassConfig::addMachineSSAOptimization() {
TargetPassConfig::addMachineSSAOptimization();
// For little endian, remove where possible the vector swap instructions
// introduced at code generation to normalize vector element order.
- if (Triple(TM->getTargetTriple()).getArch() == Triple::ppc64le &&
+ if (TM->getTargetTriple().getArch() == Triple::ppc64le &&
!DisableVSXSwapRemoval)
addPass(createPPCVSXSwapRemovalPass());
}
diff --git a/lib/Target/PowerPC/PPCTargetMachine.h b/lib/Target/PowerPC/PPCTargetMachine.h
index 7a4905889891..5c0f7e629a69 100644
--- a/lib/Target/PowerPC/PPCTargetMachine.h
+++ b/lib/Target/PowerPC/PPCTargetMachine.h
@@ -32,8 +32,8 @@ private:
mutable StringMap<std::unique_ptr<PPCSubtarget>> SubtargetMap;
public:
- PPCTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM,
+ PPCTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL);
~PPCTargetMachine() override;
@@ -50,7 +50,7 @@ public:
}
bool isELFv2ABI() const { return TargetABI == PPC_ABI_ELFv2; }
bool isPPC64() const {
- Triple TT(getTargetTriple());
+ const Triple &TT = getTargetTriple();
return (TT.getArch() == Triple::ppc64 || TT.getArch() == Triple::ppc64le);
};
};
@@ -60,8 +60,8 @@ public:
class PPC32TargetMachine : public PPCTargetMachine {
virtual void anchor();
public:
- PPC32TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
+ PPC32TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
@@ -71,8 +71,8 @@ public:
class PPC64TargetMachine : public PPCTargetMachine {
virtual void anchor();
public:
- PPC64TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
+ PPC64TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
diff --git a/lib/Target/PowerPC/PPCTargetStreamer.h b/lib/Target/PowerPC/PPCTargetStreamer.h
index dbe7617d3542..a5c4c23c7901 100644
--- a/lib/Target/PowerPC/PPCTargetStreamer.h
+++ b/lib/Target/PowerPC/PPCTargetStreamer.h
@@ -22,6 +22,6 @@ public:
virtual void emitAbiVersion(int AbiVersion) = 0;
virtual void emitLocalEntry(MCSymbolELF *S, const MCExpr *LocalOffset) = 0;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/PowerPC/PPCVSXCopy.cpp b/lib/Target/PowerPC/PPCVSXCopy.cpp
index 5e3ae2a4471b..537db656fd60 100644
--- a/lib/Target/PowerPC/PPCVSXCopy.cpp
+++ b/lib/Target/PowerPC/PPCVSXCopy.cpp
@@ -165,7 +165,7 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
INITIALIZE_PASS(PPCVSXCopy, DEBUG_TYPE,
"PowerPC VSX Copy Legalization", false, false)
diff --git a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index f352fa647ace..a029ddf0bc08 100644
--- a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -317,7 +317,7 @@ public:
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
INITIALIZE_PASS_BEGIN(PPCVSXFMAMutate, DEBUG_TYPE,
"PowerPC VSX FMA Mutation", false, false)
diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
index e238669145ad..939293a5638e 100644
--- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
+++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp
@@ -809,7 +809,7 @@ void PPCVSXSwapRemoval::dumpSwapVector() {
DEBUG(dbgs() << "\n");
}
-} // end default namespace
+} // namespace
INITIALIZE_PASS_BEGIN(PPCVSXSwapRemoval, DEBUG_TYPE,
"PowerPC VSX Swap Removal", false, false)
diff --git a/lib/Target/R600/AsmParser/CMakeLists.txt b/lib/Target/R600/AsmParser/CMakeLists.txt
deleted file mode 100644
index 1b42af73740e..000000000000
--- a/lib/Target/R600/AsmParser/CMakeLists.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMR600AsmParser
- AMDGPUAsmParser.cpp
- )
diff --git a/lib/Target/R600/CIInstructions.td b/lib/Target/R600/CIInstructions.td
deleted file mode 100644
index 560aa787fe80..000000000000
--- a/lib/Target/R600/CIInstructions.td
+++ /dev/null
@@ -1,42 +0,0 @@
-//===-- CIInstructions.td - CI Instruction Defintions ---------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Instruction definitions for CI and newer.
-//===----------------------------------------------------------------------===//
-
-
-def isCIVI : Predicate <
- "Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS || "
- "Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS"
->, AssemblerPredicate<"FeatureCIInsts">;
-
-//===----------------------------------------------------------------------===//
-// VOP1 Instructions
-//===----------------------------------------------------------------------===//
-
-let SubtargetPredicate = isCIVI in {
-
-defm V_TRUNC_F64 : VOP1Inst <vop1<0x17>, "v_trunc_f64",
- VOP_F64_F64, ftrunc
->;
-defm V_CEIL_F64 : VOP1Inst <vop1<0x18>, "v_ceil_f64",
- VOP_F64_F64, fceil
->;
-defm V_FLOOR_F64 : VOP1Inst <vop1<0x1A>, "v_floor_f64",
- VOP_F64_F64, ffloor
->;
-defm V_RNDNE_F64 : VOP1Inst <vop1<0x19>, "v_rndne_f64",
- VOP_F64_F64, frint
->;
-defm V_LOG_LEGACY_F32 : VOP1Inst <vop1<0x45, 0x4c>, "v_log_legacy_f32",
- VOP_F32_F32
->;
-defm V_EXP_LEGACY_F32 : VOP1Inst <vop1<0x46, 0x4b>, "v_exp_legacy_f32",
- VOP_F32_F32
->;
-} // End SubtargetPredicate = isCIVI
diff --git a/lib/Target/R600/InstPrinter/CMakeLists.txt b/lib/Target/R600/InstPrinter/CMakeLists.txt
deleted file mode 100644
index dcd87037fabb..000000000000
--- a/lib/Target/R600/InstPrinter/CMakeLists.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMR600AsmPrinter
- AMDGPUInstPrinter.cpp
- )
diff --git a/lib/Target/R600/TargetInfo/CMakeLists.txt b/lib/Target/R600/TargetInfo/CMakeLists.txt
deleted file mode 100644
index c3bd26c7a893..000000000000
--- a/lib/Target/R600/TargetInfo/CMakeLists.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMR600Info
- AMDGPUTargetInfo.cpp
- )
diff --git a/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
index 6b3b51afb4bd..4a33f7fc3467 100644
--- a/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
+++ b/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -76,7 +76,9 @@ class SparcAsmParser : public MCTargetAsmParser {
bool matchSparcAsmModifiers(const MCExpr *&EVal, SMLoc &EndLoc);
bool parseDirectiveWord(unsigned Size, SMLoc L);
- bool is64Bit() const { return STI.getTargetTriple().startswith("sparcv9"); }
+ bool is64Bit() const {
+ return STI.getTargetTriple().getArchName().startswith("sparcv9");
+ }
void expandSET(MCInst &Inst, SMLoc IDLoc,
SmallVectorImpl<MCInst> &Instructions);
@@ -945,6 +947,8 @@ bool SparcAsmParser::matchRegisterName(const AsmToken &Tok,
return false;
}
+// Determine if an expression contains a reference to the symbol
+// "_GLOBAL_OFFSET_TABLE_".
static bool hasGOTReference(const MCExpr *Expr) {
switch (Expr->getKind()) {
case MCExpr::Target:
@@ -996,6 +1000,13 @@ bool SparcAsmParser::matchSparcAsmModifiers(const MCExpr *&EVal,
bool isPIC = getContext().getObjectFileInfo()->getRelocM() == Reloc::PIC_;
+ // Ugly: if a sparc assembly expression says "%hi(...)" but the
+ // expression within contains _GLOBAL_OFFSET_TABLE_, it REALLY means
+ // %pc22. Same with %lo -> %pc10. Worse, if it doesn't contain that,
+ // the meaning depends on whether the assembler was invoked with
+ // -KPIC or not: if so, it really means %got22/%got10; if not, it
+ // actually means what it said! Sigh, historical mistakes...
+
switch(VK) {
default: break;
case SparcMCExpr::VK_Sparc_LO:
diff --git a/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp b/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
index 3e56b9e9b883..59f011aefe66 100644
--- a/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
+++ b/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
@@ -41,7 +41,7 @@ public:
raw_ostream &VStream,
raw_ostream &CStream) const override;
};
-}
+} // namespace
namespace llvm {
extern Target TheSparcTarget, TheSparcV9Target, TheSparcelTarget;
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp b/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
index 9388527004f5..d1d7aaa07eab 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
@@ -297,10 +297,8 @@ namespace {
} // end anonymous namespace
-
MCAsmBackend *llvm::createSparcAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT,
- StringRef CPU) {
- return new ELFSparcAsmBackend(T, Triple(TT).getOS());
+ const Triple &TT, StringRef CPU) {
+ return new ELFSparcAsmBackend(T, TT.getOS());
}
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp b/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
index 4f07ae219205..800a5f254b8f 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
@@ -31,8 +31,12 @@ namespace {
protected:
unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
bool IsPCRel) const override;
+
+ bool needsRelocateWithSymbol(const MCSymbol &Sym,
+ unsigned Type) const override;
+
};
-}
+} // namespace
unsigned SparcELFObjectWriter::GetRelocType(const MCValue &Target,
const MCFixup &Fixup,
@@ -105,6 +109,27 @@ unsigned SparcELFObjectWriter::GetRelocType(const MCValue &Target,
return ELF::R_SPARC_NONE;
}
+bool SparcELFObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym,
+ unsigned Type) const {
+ switch (Type) {
+ default:
+ return false;
+
+ // All relocations that use a GOT need a symbol, not an offset, as
+ // the offset of the symbol within the section is irrelevant to
+ // where the GOT entry is. Don't need to list all the TLS entries,
+ // as they're all marked as requiring a symbol anyways.
+ case ELF::R_SPARC_GOT10:
+ case ELF::R_SPARC_GOT13:
+ case ELF::R_SPARC_GOT22:
+ case ELF::R_SPARC_GOTDATA_HIX22:
+ case ELF::R_SPARC_GOTDATA_LOX10:
+ case ELF::R_SPARC_GOTDATA_OP_HIX22:
+ case ELF::R_SPARC_GOTDATA_OP_LOX10:
+ return true;
+ }
+}
+
MCObjectWriter *llvm::createSparcELFObjectWriter(raw_pwrite_stream &OS,
bool Is64Bit,
bool IsLittleEndian,
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h b/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
index 8d79396d936e..34c58da10d5d 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
+++ b/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
@@ -91,7 +91,7 @@ namespace llvm {
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
};
- }
-}
+ } // namespace Sparc
+} // namespace llvm
#endif
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
index d34c87977168..91d2eeef0cc0 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
@@ -63,12 +63,11 @@ static MCRegisterInfo *createSparcMCRegisterInfo(StringRef TT) {
return X;
}
-static MCSubtargetInfo *createSparcMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
+static MCSubtargetInfo *
+createSparcMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
- Triple TheTriple(TT);
if (CPU.empty())
- CPU = (TheTriple.getArch() == Triple::sparcv9) ? "v9" : "v8";
+ CPU = (TT.getArch() == Triple::sparcv9) ? "v9" : "v8";
InitSparcMCSubtargetInfo(X, TT, CPU, FS);
return X;
}
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
index 28e211948c37..8f62de4a4fd2 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
@@ -25,6 +25,7 @@ class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
class Target;
+class Triple;
class StringRef;
class raw_pwrite_stream;
class raw_ostream;
@@ -37,10 +38,10 @@ MCCodeEmitter *createSparcMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
MCContext &Ctx);
MCAsmBackend *createSparcAsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
MCObjectWriter *createSparcELFObjectWriter(raw_pwrite_stream &OS, bool Is64Bit,
bool IsLIttleEndian, uint8_t OSABI);
-} // End llvm namespace
+} // namespace llvm
// Defines symbolic names for Sparc registers. This defines a mapping from
// register name to register number.
diff --git a/lib/Target/Sparc/Sparc.h b/lib/Target/Sparc/Sparc.h
index 96378d522dc0..133af8694139 100644
--- a/lib/Target/Sparc/Sparc.h
+++ b/lib/Target/Sparc/Sparc.h
@@ -33,7 +33,7 @@ namespace llvm {
void LowerSparcMachineInstrToMCInst(const MachineInstr *MI,
MCInst &OutMI,
AsmPrinter &AP);
-} // end namespace llvm;
+} // namespace llvm
namespace llvm {
// Enums corresponding to Sparc condition codes, both icc's and fcc's. These
@@ -74,7 +74,7 @@ namespace llvm {
FCC_ULE = 14+16, // Unordered or Less or Equal
FCC_O = 15+16 // Ordered
};
- }
+ } // namespace SPCC
inline static const char *SPARCCondCodeToString(SPCC::CondCodes CC) {
switch (CC) {
diff --git a/lib/Target/Sparc/SparcFrameLowering.h b/lib/Target/Sparc/SparcFrameLowering.h
index bb3b78861cbd..3d73bbd0d90c 100644
--- a/lib/Target/Sparc/SparcFrameLowering.h
+++ b/lib/Target/Sparc/SparcFrameLowering.h
@@ -55,6 +55,6 @@ private:
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h
index b6bc3d255713..a4b9c79c3264 100644
--- a/lib/Target/Sparc/SparcISelLowering.h
+++ b/lib/Target/Sparc/SparcISelLowering.h
@@ -49,7 +49,7 @@ namespace llvm {
TLS_LD,
TLS_CALL
};
- }
+ } // namespace SPISD
class SparcTargetLowering : public TargetLowering {
const SparcSubtarget *Subtarget;
diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp
index 4b70f1619b13..f87cee43e319 100644
--- a/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -229,7 +229,7 @@ bool SparcInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
unsigned
SparcInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
diff --git a/lib/Target/Sparc/SparcInstrInfo.h b/lib/Target/Sparc/SparcInstrInfo.h
index 6e0841898073..b59dd896019c 100644
--- a/lib/Target/Sparc/SparcInstrInfo.h
+++ b/lib/Target/Sparc/SparcInstrInfo.h
@@ -73,8 +73,7 @@ public:
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
void copyPhysReg(MachineBasicBlock &MBB,
@@ -97,6 +96,6 @@ public:
unsigned getGlobalBaseReg(MachineFunction *MF) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Sparc/SparcMachineFunctionInfo.h b/lib/Target/Sparc/SparcMachineFunctionInfo.h
index 104744279d9d..0471443f5961 100644
--- a/lib/Target/Sparc/SparcMachineFunctionInfo.h
+++ b/lib/Target/Sparc/SparcMachineFunctionInfo.h
@@ -51,6 +51,6 @@ namespace llvm {
void setLeafProc(bool rhs) { IsLeafProc = rhs; }
bool isLeafProc() const { return IsLeafProc; }
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Sparc/SparcSelectionDAGInfo.h b/lib/Target/Sparc/SparcSelectionDAGInfo.h
index 6818291b30b4..2ceae82c8cdb 100644
--- a/lib/Target/Sparc/SparcSelectionDAGInfo.h
+++ b/lib/Target/Sparc/SparcSelectionDAGInfo.h
@@ -26,6 +26,6 @@ public:
~SparcSelectionDAGInfo() override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/Sparc/SparcSubtarget.cpp b/lib/Target/Sparc/SparcSubtarget.cpp
index ce1105f2d72f..479b25d2723f 100644
--- a/lib/Target/Sparc/SparcSubtarget.cpp
+++ b/lib/Target/Sparc/SparcSubtarget.cpp
@@ -49,7 +49,7 @@ SparcSubtarget &SparcSubtarget::initializeSubtargetDependencies(StringRef CPU,
return *this;
}
-SparcSubtarget::SparcSubtarget(const std::string &TT, const std::string &CPU,
+SparcSubtarget::SparcSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, TargetMachine &TM,
bool is64Bit)
: SparcGenSubtargetInfo(TT, CPU, FS), Is64Bit(is64Bit),
diff --git a/lib/Target/Sparc/SparcSubtarget.h b/lib/Target/Sparc/SparcSubtarget.h
index e6cf460b85c6..983b1193975d 100644
--- a/lib/Target/Sparc/SparcSubtarget.h
+++ b/lib/Target/Sparc/SparcSubtarget.h
@@ -43,7 +43,7 @@ class SparcSubtarget : public SparcGenSubtargetInfo {
SparcFrameLowering FrameLowering;
public:
- SparcSubtarget(const std::string &TT, const std::string &CPU,
+ SparcSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, TargetMachine &TM, bool is64bit);
const SparcInstrInfo *getInstrInfo() const override { return &InstrInfo; }
diff --git a/lib/Target/Sparc/SparcTargetMachine.cpp b/lib/Target/Sparc/SparcTargetMachine.cpp
index d43cd9e31271..725d7f047c47 100644
--- a/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -54,13 +54,13 @@ static std::string computeDataLayout(const Triple &T, bool is64Bit) {
/// SparcTargetMachine ctor - Create an ILP32 architecture model
///
-SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
+SparcTargetMachine::SparcTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL, bool is64bit)
- : LLVMTargetMachine(T, computeDataLayout(Triple(TT), is64bit), TT, CPU, FS,
- Options, RM, CM, OL),
+ : LLVMTargetMachine(T, computeDataLayout(TT, is64bit), TT, CPU, FS, Options,
+ RM, CM, OL),
TLOF(make_unique<SparcELFTargetObjectFile>()),
Subtarget(TT, CPU, FS, *this, is64bit) {
initAsmInfo();
@@ -106,19 +106,16 @@ void SparcPassConfig::addPreEmitPass(){
void SparcV8TargetMachine::anchor() { }
-SparcV8TargetMachine::SparcV8TargetMachine(const Target &T,
- StringRef TT, StringRef CPU,
- StringRef FS,
+SparcV8TargetMachine::SparcV8TargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM,
- CodeModel::Model CM,
+ Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {
-}
+ : SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
void SparcV9TargetMachine::anchor() { }
-SparcV9TargetMachine::SparcV9TargetMachine(const Target &T, StringRef TT,
+SparcV9TargetMachine::SparcV9TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
@@ -127,7 +124,7 @@ SparcV9TargetMachine::SparcV9TargetMachine(const Target &T, StringRef TT,
void SparcelTargetMachine::anchor() {}
-SparcelTargetMachine::SparcelTargetMachine(const Target &T, StringRef TT,
+SparcelTargetMachine::SparcelTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
diff --git a/lib/Target/Sparc/SparcTargetMachine.h b/lib/Target/Sparc/SparcTargetMachine.h
index fd05b8c711be..903c2d15629f 100644
--- a/lib/Target/Sparc/SparcTargetMachine.h
+++ b/lib/Target/Sparc/SparcTargetMachine.h
@@ -24,10 +24,10 @@ class SparcTargetMachine : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
SparcSubtarget Subtarget;
public:
- SparcTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
- Reloc::Model RM, CodeModel::Model CM,
- CodeGenOpt::Level OL, bool is64bit);
+ SparcTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL,
+ bool is64bit);
~SparcTargetMachine() override;
const SparcSubtarget *getSubtargetImpl(const Function &) const override {
@@ -46,9 +46,8 @@ public:
class SparcV8TargetMachine : public SparcTargetMachine {
virtual void anchor();
public:
- SparcV8TargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS,
- const TargetOptions &Options,
+ SparcV8TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
};
@@ -58,7 +57,7 @@ public:
class SparcV9TargetMachine : public SparcTargetMachine {
virtual void anchor();
public:
- SparcV9TargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ SparcV9TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
@@ -68,7 +67,7 @@ class SparcelTargetMachine : public SparcTargetMachine {
virtual void anchor();
public:
- SparcelTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ SparcelTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
index 0e8a680d4dd4..57eebe19c044 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
@@ -111,7 +111,7 @@ bool SystemZMCAsmBackend::writeNopData(uint64_t Count,
MCAsmBackend *llvm::createSystemZMCAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU) {
- uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
+ const Triple &TT, StringRef CPU) {
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS());
return new SystemZMCAsmBackend(OSABI);
}
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
index 92681cf6e44b..81882106fc46 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
@@ -154,9 +154,8 @@ static MCRegisterInfo *createSystemZMCRegisterInfo(StringRef TT) {
return X;
}
-static MCSubtargetInfo *createSystemZMCSubtargetInfo(StringRef TT,
- StringRef CPU,
- StringRef FS) {
+static MCSubtargetInfo *
+createSystemZMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
InitSystemZMCSubtargetInfo(X, TT, CPU, FS);
return X;
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
index 36ea750ec8dc..0db48fe5a109 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
@@ -23,6 +23,7 @@ class MCRegisterInfo;
class MCSubtargetInfo;
class StringRef;
class Target;
+class Triple;
class raw_pwrite_stream;
class raw_ostream;
@@ -84,7 +85,7 @@ MCCodeEmitter *createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
MCAsmBackend *createSystemZMCAsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
MCObjectWriter *createSystemZObjectWriter(raw_pwrite_stream &OS, uint8_t OSABI);
} // end namespace llvm
diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 63992936813d..0eb3d6593fe6 100644
--- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -1113,8 +1113,8 @@ bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
if (V1 == V2 && End1 == End2)
return false;
- return !AA->alias(AliasAnalysis::Location(V1, End1, Load->getAAInfo()),
- AliasAnalysis::Location(V2, End2, Store->getAAInfo()));
+ return !AA->alias(MemoryLocation(V1, End1, Load->getAAInfo()),
+ MemoryLocation(V2, End2, Store->getAAInfo()));
}
bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index 91e12c2d9d7e..75845796de79 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -3292,7 +3292,7 @@ struct Permute {
unsigned Operand;
unsigned char Bytes[SystemZ::VectorBytes];
};
-}
+} // namespace
static const Permute PermuteForms[] = {
// VMRHG
@@ -3574,7 +3574,7 @@ struct GeneralShuffle {
// The type of the shuffle result.
EVT VT;
};
-}
+} // namespace
// Add an extra undefined element to the shuffle.
void GeneralShuffle::addUndef() {
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 4346850e0ac5..5d4a34f7131c 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -362,7 +362,7 @@ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
unsigned
SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
// In this function we output 32-bit branches, which should always
// have enough range. They can be shortened and relaxed by later code
@@ -530,8 +530,7 @@ isProfitableToIfCvt(MachineBasicBlock &TMBB,
}
bool SystemZInstrInfo::
-PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const {
+PredicateInstruction(MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
assert(Pred.size() == 2 && "Invalid condition");
unsigned CCValid = Pred[0].getImm();
unsigned CCMask = Pred[1].getImm();
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.h b/lib/Target/SystemZ/SystemZInstrInfo.h
index e47f2ee9d0b6..31c9db209585 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -149,8 +149,7 @@ public:
bool AllowModify) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
unsigned &SrcReg2, int &Mask, int &Value) const override;
@@ -167,8 +166,7 @@ public:
unsigned NumCyclesF, unsigned ExtraPredCyclesF,
const BranchProbability &Probability) const override;
bool PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const
- override;
+ ArrayRef<MachineOperand> Pred) const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
DebugLoc DL, unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
diff --git a/lib/Target/SystemZ/SystemZSubtarget.cpp b/lib/Target/SystemZ/SystemZSubtarget.cpp
index 05aede3deb4f..eb5e5c0b9ff8 100644
--- a/lib/Target/SystemZ/SystemZSubtarget.cpp
+++ b/lib/Target/SystemZ/SystemZSubtarget.cpp
@@ -32,8 +32,7 @@ SystemZSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
return *this;
}
-SystemZSubtarget::SystemZSubtarget(const std::string &TT,
- const std::string &CPU,
+SystemZSubtarget::SystemZSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS,
const TargetMachine &TM)
: SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false),
@@ -41,9 +40,9 @@ SystemZSubtarget::SystemZSubtarget(const std::string &TT,
HasPopulationCount(false), HasFastSerialization(false),
HasInterlockedAccess1(false), HasMiscellaneousExtensions(false),
HasTransactionalExecution(false), HasProcessorAssist(false),
- HasVector(false),
- TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
- TLInfo(TM, *this), TSInfo(*TM.getDataLayout()), FrameLowering() {}
+ HasVector(false), TargetTriple(TT),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
+ TSInfo(*TM.getDataLayout()), FrameLowering() {}
// Return true if GV binds locally under reloc model RM.
static bool bindsLocally(const GlobalValue *GV, Reloc::Model RM) {
diff --git a/lib/Target/SystemZ/SystemZSubtarget.h b/lib/Target/SystemZ/SystemZSubtarget.h
index 9a1f593f5265..f7eaf01cb77e 100644
--- a/lib/Target/SystemZ/SystemZSubtarget.h
+++ b/lib/Target/SystemZ/SystemZSubtarget.h
@@ -56,7 +56,7 @@ private:
SystemZSubtarget &initializeSubtargetDependencies(StringRef CPU,
StringRef FS);
public:
- SystemZSubtarget(const std::string &TT, const std::string &CPU,
+ SystemZSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM);
const TargetFrameLowering *getFrameLowering() const override {
diff --git a/lib/Target/SystemZ/SystemZTargetMachine.cpp b/lib/Target/SystemZ/SystemZTargetMachine.cpp
index a34cdaf8030d..00cbbd10a819 100644
--- a/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -43,9 +43,8 @@ static bool UsesVectorABI(StringRef CPU, StringRef FS) {
return VectorABI;
}
-static std::string computeDataLayout(StringRef TT, StringRef CPU,
+static std::string computeDataLayout(const Triple &TT, StringRef CPU,
StringRef FS) {
- const Triple Triple(TT);
bool VectorABI = UsesVectorABI(CPU, FS);
std::string Ret = "";
@@ -53,7 +52,7 @@ static std::string computeDataLayout(StringRef TT, StringRef CPU,
Ret += "E";
// Data mangling.
- Ret += DataLayout::getManglingComponent(Triple);
+ Ret += DataLayout::getManglingComponent(TT);
// Make sure that global data has at least 16 bits of alignment by
// default, so that we can refer to it using LARL. We don't have any
@@ -79,13 +78,13 @@ static std::string computeDataLayout(StringRef TT, StringRef CPU,
return Ret;
}
-SystemZTargetMachine::SystemZTargetMachine(const Target &T, StringRef TT,
+SystemZTargetMachine::SystemZTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, computeDataLayout(TT, CPU, FS),
- TT, CPU, FS, Options, RM, CM, OL),
+ : LLVMTargetMachine(T, computeDataLayout(TT, CPU, FS), TT, CPU, FS, Options,
+ RM, CM, OL),
TLOF(make_unique<TargetLoweringObjectFileELF>()),
Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
diff --git a/lib/Target/SystemZ/SystemZTargetMachine.h b/lib/Target/SystemZ/SystemZTargetMachine.h
index 5ded07c1efb2..0a81e1f9fdf9 100644
--- a/lib/Target/SystemZ/SystemZTargetMachine.h
+++ b/lib/Target/SystemZ/SystemZTargetMachine.h
@@ -27,7 +27,7 @@ class SystemZTargetMachine : public LLVMTargetMachine {
SystemZSubtarget Subtarget;
public:
- SystemZTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ SystemZTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
diff --git a/lib/Target/TargetLoweringObjectFile.cpp b/lib/Target/TargetLoweringObjectFile.cpp
index d498bb104ef8..19b5e2a0f978 100644
--- a/lib/Target/TargetLoweringObjectFile.cpp
+++ b/lib/Target/TargetLoweringObjectFile.cpp
@@ -44,8 +44,8 @@ void TargetLoweringObjectFile::Initialize(MCContext &ctx,
const TargetMachine &TM) {
Ctx = &ctx;
DL = TM.getDataLayout();
- InitMCObjectFileInfo(TM.getTargetTriple(),
- TM.getRelocationModel(), TM.getCodeModel(), *Ctx);
+ InitMCObjectFileInfo(TM.getTargetTriple(), TM.getRelocationModel(),
+ TM.getCodeModel(), *Ctx);
}
TargetLoweringObjectFile::~TargetLoweringObjectFile() {
diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp
index 28242502ec85..0b05303f71bf 100644
--- a/lib/Target/TargetMachine.cpp
+++ b/lib/Target/TargetMachine.cpp
@@ -38,7 +38,7 @@ using namespace llvm;
//
TargetMachine::TargetMachine(const Target &T, StringRef DataLayoutString,
- StringRef TT, StringRef CPU, StringRef FS,
+ const Triple &TT, StringRef CPU, StringRef FS,
const TargetOptions &Options)
: TheTarget(T), DL(DataLayoutString), TargetTriple(TT), TargetCPU(CPU),
TargetFS(FS), CodeGenInfo(nullptr), AsmInfo(nullptr), MRI(nullptr),
@@ -70,7 +70,6 @@ void TargetMachine::resetTargetOptions(const Function &F) const {
RESET_OPTION(UnsafeFPMath, "unsafe-fp-math");
RESET_OPTION(NoInfsFPMath, "no-infs-fp-math");
RESET_OPTION(NoNaNsFPMath, "no-nans-fp-math");
- RESET_OPTION(DisableTailCalls, "disable-tail-calls");
}
/// getRelocationModel - Returns the code generation relocation model. The
diff --git a/lib/Target/TargetMachineC.cpp b/lib/Target/TargetMachineC.cpp
index 623b3e8ca320..719923558de4 100644
--- a/lib/Target/TargetMachineC.cpp
+++ b/lib/Target/TargetMachineC.cpp
@@ -156,7 +156,7 @@ LLVMTargetRef LLVMGetTargetMachineTarget(LLVMTargetMachineRef T) {
}
char* LLVMGetTargetMachineTriple(LLVMTargetMachineRef T) {
- std::string StringRep = unwrap(T)->getTargetTriple();
+ std::string StringRep = unwrap(T)->getTargetTriple().str();
return strdup(StringRep.c_str());
}
diff --git a/lib/Target/TargetSubtargetInfo.cpp b/lib/Target/TargetSubtargetInfo.cpp
index b2bb59ea28c4..87df7af84525 100644
--- a/lib/Target/TargetSubtargetInfo.cpp
+++ b/lib/Target/TargetSubtargetInfo.cpp
@@ -40,7 +40,7 @@ bool TargetSubtargetInfo::enableRALocalReassignment(
return true;
}
-bool TargetSubtargetInfo::enablePostMachineScheduler() const {
+bool TargetSubtargetInfo::enablePostRAScheduler() const {
return getSchedModel().PostRAScheduler;
}
diff --git a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
index 9eee4a0f3d82..6ba897b8636d 100644
--- a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
@@ -1080,4 +1080,4 @@ CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
return new X86AsmInstrumentation(STI);
}
-} // End llvm namespace
+} // namespace llvm
diff --git a/lib/Target/X86/AsmParser/X86AsmInstrumentation.h b/lib/Target/X86/AsmParser/X86AsmInstrumentation.h
index 19ebcc44f61e..341fc81c0480 100644
--- a/lib/Target/X86/AsmParser/X86AsmInstrumentation.h
+++ b/lib/Target/X86/AsmParser/X86AsmInstrumentation.h
@@ -61,6 +61,6 @@ protected:
unsigned InitialFrameReg;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index e8965710f022..418f0431e1d8 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -42,15 +42,16 @@ namespace {
static const char OpPrecedence[] = {
0, // IC_OR
- 1, // IC_AND
- 2, // IC_LSHIFT
- 2, // IC_RSHIFT
- 3, // IC_PLUS
- 3, // IC_MINUS
- 4, // IC_MULTIPLY
- 4, // IC_DIVIDE
- 5, // IC_RPAREN
- 6, // IC_LPAREN
+ 1, // IC_XOR
+ 2, // IC_AND
+ 3, // IC_LSHIFT
+ 3, // IC_RSHIFT
+ 4, // IC_PLUS
+ 4, // IC_MINUS
+ 5, // IC_MULTIPLY
+ 5, // IC_DIVIDE
+ 6, // IC_RPAREN
+ 7, // IC_LPAREN
0, // IC_IMM
0 // IC_REGISTER
};
@@ -70,6 +71,7 @@ private:
enum InfixCalculatorTok {
IC_OR = 0,
+ IC_XOR,
IC_AND,
IC_LSHIFT,
IC_RSHIFT,
@@ -204,6 +206,12 @@ private:
Val = Op1.second | Op2.second;
OperandStack.push_back(std::make_pair(IC_IMM, Val));
break;
+ case IC_XOR:
+ assert(Op1.first == IC_IMM && Op2.first == IC_IMM &&
+ "Xor operation with an immediate and a register!");
+ Val = Op1.second ^ Op2.second;
+ OperandStack.push_back(std::make_pair(IC_IMM, Val));
+ break;
case IC_AND:
assert (Op1.first == IC_IMM && Op2.first == IC_IMM &&
"And operation with an immediate and a register!");
@@ -232,6 +240,7 @@ private:
enum IntelExprState {
IES_OR,
+ IES_XOR,
IES_AND,
IES_LSHIFT,
IES_RSHIFT,
@@ -297,6 +306,21 @@ private:
}
PrevState = CurrState;
}
+ void onXor() {
+ IntelExprState CurrState = State;
+ switch (State) {
+ default:
+ State = IES_ERROR;
+ break;
+ case IES_INTEGER:
+ case IES_RPAREN:
+ case IES_REGISTER:
+ State = IES_XOR;
+ IC.pushOperator(IC_XOR);
+ break;
+ }
+ PrevState = CurrState;
+ }
void onAnd() {
IntelExprState CurrState = State;
switch (State) {
@@ -473,6 +497,7 @@ private:
case IES_MINUS:
case IES_NOT:
case IES_OR:
+ case IES_XOR:
case IES_AND:
case IES_LSHIFT:
case IES_RSHIFT:
@@ -496,7 +521,7 @@ private:
PrevState == IES_LSHIFT || PrevState == IES_RSHIFT ||
PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE ||
PrevState == IES_LPAREN || PrevState == IES_LBRAC ||
- PrevState == IES_NOT) &&
+ PrevState == IES_NOT || PrevState == IES_XOR) &&
CurrState == IES_MINUS) {
// Unary minus. No need to pop the minus operand because it was never
// pushed.
@@ -506,7 +531,7 @@ private:
PrevState == IES_LSHIFT || PrevState == IES_RSHIFT ||
PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE ||
PrevState == IES_LPAREN || PrevState == IES_LBRAC ||
- PrevState == IES_NOT) &&
+ PrevState == IES_NOT || PrevState == IES_XOR) &&
CurrState == IES_NOT) {
// Unary not. No need to pop the not operand because it was never
// pushed.
@@ -593,6 +618,7 @@ private:
case IES_MINUS:
case IES_NOT:
case IES_OR:
+ case IES_XOR:
case IES_AND:
case IES_LSHIFT:
case IES_RSHIFT:
@@ -605,7 +631,7 @@ private:
PrevState == IES_LSHIFT || PrevState == IES_RSHIFT ||
PrevState == IES_MULTIPLY || PrevState == IES_DIVIDE ||
PrevState == IES_LPAREN || PrevState == IES_LBRAC ||
- PrevState == IES_NOT) &&
+ PrevState == IES_NOT || PrevState == IES_XOR) &&
(CurrState == IES_MINUS || CurrState == IES_NOT)) {
State = IES_ERROR;
break;
@@ -1217,6 +1243,7 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
case AsmToken::Star: SM.onStar(); break;
case AsmToken::Slash: SM.onDivide(); break;
case AsmToken::Pipe: SM.onOr(); break;
+ case AsmToken::Caret: SM.onXor(); break;
case AsmToken::Amp: SM.onAnd(); break;
case AsmToken::LessLess:
SM.onLShift(); break;
diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp
index 6e99c37c2bc7..5b53fbef3f71 100644
--- a/lib/Target/X86/Disassembler/X86Disassembler.cpp
+++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp
@@ -69,7 +69,7 @@ namespace X86 {
extern Target TheX86_32Target, TheX86_64Target;
-}
+} // namespace llvm
static bool translateInstruction(MCInst &target,
InternalInstruction &source,
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
index 62b6b73e7864..ac484f317276 100644
--- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
+++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h
@@ -140,6 +140,6 @@ public:
private:
bool HasCustomInstComment;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
index 6e371da37290..2bee518fed68 100644
--- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
+++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h
@@ -159,6 +159,6 @@ public:
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 1ac656d4614b..2d85f84d6669 100644
--- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -426,7 +426,7 @@ namespace CU {
UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
};
-} // end CU namespace
+} // namespace CU
class DarwinX86AsmBackend : public X86AsmBackend {
const MCRegisterInfo &MRI;
@@ -790,10 +790,8 @@ public:
MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT,
+ const Triple &TheTriple,
StringRef CPU) {
- Triple TheTriple(TT);
-
if (TheTriple.isOSBinFormatMachO())
return new DarwinX86_32AsmBackend(T, MRI, CPU);
@@ -806,10 +804,8 @@ MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
const MCRegisterInfo &MRI,
- StringRef TT,
+ const Triple &TheTriple,
StringRef CPU) {
- Triple TheTriple(TT);
-
if (TheTriple.isOSBinFormatMachO()) {
MachO::CPUSubTypeX86 CS =
StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName())
diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index 85b00068252d..69e9c7b4a83e 100644
--- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -41,7 +41,7 @@ namespace X86 {
/// AddrNumOperands - Total number of operands in a memory reference.
AddrNumOperands = 5
};
-} // end namespace X86;
+} // namespace X86
/// X86II - This namespace holds all of the target specific flags that
/// instruction info tracks.
@@ -271,7 +271,7 @@ namespace X86II {
/// register DI/EDI/ESI.
RawFrmDst = 9,
- /// RawFrmSrc - This form is for instructions that use the the source index
+ /// RawFrmSrc - This form is for instructions that use the source index
/// register SI/ESI/ERI with a possible segment override, and also the
/// destination index register DI/ESI/RDI.
RawFrmDstSrc = 10,
@@ -762,8 +762,8 @@ namespace X86II {
return (reg == X86::SPL || reg == X86::BPL ||
reg == X86::SIL || reg == X86::DIL);
}
-}
+} // namespace X86II
-} // end namespace llvm;
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index a33468dc4769..512afebf482e 100644
--- a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -28,7 +28,7 @@ namespace {
unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
bool IsPCRel) const override;
};
-}
+} // namespace
X86ELFObjectWriter::X86ELFObjectWriter(bool IsELF64, uint8_t OSABI,
uint16_t EMachine)
diff --git a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
index 2943dd383efa..7c09e5d59580 100644
--- a/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
@@ -32,7 +32,8 @@ public:
StringRef SymName; SymI->getName(SymName);
uint64_t SymAddr; SymI->getAddress(SymAddr);
uint64_t SymSize = SymI->getSize();
- int64_t Addend; getELFRelocationAddend(Rel, Addend);
+ auto *Obj = cast<ELFObjectFileBase>(Rel.getObjectFile());
+ int64_t Addend = *Obj->getRelocationAddend(Rel.getRawDataRefImpl());
MCSymbol *Sym = Ctx.getOrCreateSymbol(SymName);
// FIXME: check that the value is actually the same.
diff --git a/lib/Target/X86/MCTargetDesc/X86FixupKinds.h b/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
index 4899900dcef9..a523a32b2a2d 100644
--- a/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
+++ b/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
@@ -28,7 +28,7 @@ enum Fixups {
LastTargetFixupKind,
NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
};
-}
-}
+} // namespace X86
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index cc98e55dc695..431010d4cbc2 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -42,12 +42,11 @@ using namespace llvm;
#define GET_SUBTARGETINFO_MC_DESC
#include "X86GenSubtargetInfo.inc"
-std::string X86_MC::ParseX86Triple(StringRef TT) {
- Triple TheTriple(TT);
+std::string X86_MC::ParseX86Triple(const Triple &TT) {
std::string FS;
- if (TheTriple.getArch() == Triple::x86_64)
+ if (TT.getArch() == Triple::x86_64)
FS = "+64bit-mode,-32bit-mode,-16bit-mode";
- else if (TheTriple.getEnvironment() != Triple::CODE16)
+ else if (TT.getEnvironment() != Triple::CODE16)
FS = "-64bit-mode,+32bit-mode,-16bit-mode";
else
FS = "-64bit-mode,-32bit-mode,+16bit-mode";
@@ -55,7 +54,7 @@ std::string X86_MC::ParseX86Triple(StringRef TT) {
return FS;
}
-unsigned X86_MC::getDwarfRegFlavour(Triple TT, bool isEH) {
+unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
if (TT.getArch() == Triple::x86_64)
return DWARFFlavour::X86_64;
@@ -75,8 +74,8 @@ void X86_MC::InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI) {
}
}
-MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
+MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT,
+ StringRef CPU, StringRef FS) {
std::string ArchFS = X86_MC::ParseX86Triple(TT);
if (!FS.empty()) {
if (!ArchFS.empty())
@@ -219,15 +218,14 @@ static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
return nullptr;
}
-static MCRelocationInfo *createX86MCRelocationInfo(StringRef TT,
+static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple,
MCContext &Ctx) {
- Triple TheTriple(TT);
if (TheTriple.isOSBinFormatMachO() && TheTriple.getArch() == Triple::x86_64)
return createX86_64MachORelocationInfo(Ctx);
else if (TheTriple.isOSBinFormatELF())
return createX86_64ELFRelocationInfo(Ctx);
// Default to the stock relocation info.
- return llvm::createMCRelocationInfo(TT, Ctx);
+ return llvm::createMCRelocationInfo(TheTriple, Ctx);
}
static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) {
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index dcdae1dbc469..020803b57f76 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -52,26 +52,26 @@ namespace N86 {
}
namespace X86_MC {
- std::string ParseX86Triple(StringRef TT);
+std::string ParseX86Triple(const Triple &TT);
- unsigned getDwarfRegFlavour(Triple TT, bool isEH);
+unsigned getDwarfRegFlavour(const Triple &TT, bool isEH);
- void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI);
+void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI);
- /// Create a X86 MCSubtargetInfo instance. This is exposed so Asm parser, etc.
- /// do not need to go through TargetRegistry.
- MCSubtargetInfo *createX86MCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS);
-}
+/// Create a X86 MCSubtargetInfo instance. This is exposed so Asm parser, etc.
+/// do not need to go through TargetRegistry.
+MCSubtargetInfo *createX86MCSubtargetInfo(const Triple &TT, StringRef CPU,
+ StringRef FS);
+} // namespace X86_MC
MCCodeEmitter *createX86MCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
MCContext &Ctx);
MCAsmBackend *createX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
MCAsmBackend *createX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
- StringRef TT, StringRef CPU);
+ const Triple &TT, StringRef CPU);
/// Construct an X86 Windows COFF machine code streamer which will generate
/// PE/COFF format object files.
@@ -98,7 +98,7 @@ MCRelocationInfo *createX86_64MachORelocationInfo(MCContext &Ctx);
/// Construct X86-64 ELF relocation info.
MCRelocationInfo *createX86_64ELFRelocationInfo(MCContext &Ctx);
-} // End llvm namespace
+} // namespace llvm
// Defines symbolic names for X86 registers. This defines a mapping from
diff --git a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index 95acc07192da..773fbf41a7b1 100644
--- a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -69,7 +69,7 @@ public:
FixedValue);
}
};
-}
+} // namespace
static bool isFixupKindRIPRel(unsigned Kind) {
return Kind == X86::reloc_riprel_4byte ||
@@ -205,7 +205,7 @@ void X86MachObjectWriter::RecordX86_64Relocation(
if (Symbol->isTemporary() && Value) {
const MCSection &Sec = Symbol->getSection();
if (!Asm.getContext().getAsmInfo()->isSectionAtomizableBySymbols(Sec))
- Asm.addLocalUsedInReloc(*Symbol);
+ Symbol->setUsedInReloc();
}
RelSymbol = Asm.getAtom(*Symbol);
diff --git a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
index bd1bc9943b6d..7d262cdbf51d 100644
--- a/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -31,7 +31,7 @@ namespace {
bool IsCrossSection,
const MCAsmBackend &MAB) const override;
};
-}
+} // namespace
X86WinCOFFObjectWriter::X86WinCOFFObjectWriter(bool Is64Bit)
: MCWinCOFFObjectTargetWriter(Is64Bit ? COFF::IMAGE_FILE_MACHINE_AMD64
diff --git a/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp b/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp
index 92f42b68ae51..dc6dd66bcd85 100644
--- a/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86WinCOFFStreamer.cpp
@@ -46,7 +46,7 @@ void X86WinCOFFStreamer::FinishImpl() {
MCWinCOFFStreamer::FinishImpl();
}
-}
+} // namespace
MCStreamer *llvm::createX86WinCOFFStreamer(MCContext &C, MCAsmBackend &AB,
raw_pwrite_stream &OS,
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.cpp b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
index ef3318ba7580..1e7d94287c4a 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.cpp
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.cpp
@@ -431,4 +431,4 @@ void DecodeScalarMoveMask(MVT VT, bool IsLoad, SmallVectorImpl<int> &Mask) {
for (unsigned i = 1; i < NumElts; i++)
Mask.push_back(IsLoad ? static_cast<int>(SM_SentinelZero) : i);
}
-} // llvm namespace
+} // namespace llvm
diff --git a/lib/Target/X86/Utils/X86ShuffleDecode.h b/lib/Target/X86/Utils/X86ShuffleDecode.h
index 14b69434806e..0139297fc72d 100644
--- a/lib/Target/X86/Utils/X86ShuffleDecode.h
+++ b/lib/Target/X86/Utils/X86ShuffleDecode.h
@@ -100,6 +100,6 @@ void DecodeZeroMoveLowMask(MVT VT, SmallVectorImpl<int> &ShuffleMask);
/// \brief Decode a scalar float move instruction as a shuffle mask.
void DecodeScalarMoveMask(MVT VT, bool IsLoad,
SmallVectorImpl<int> &ShuffleMask);
-} // llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86.h b/lib/Target/X86/X86.h
index 8403ae6101df..80f457984951 100644
--- a/lib/Target/X86/X86.h
+++ b/lib/Target/X86/X86.h
@@ -80,6 +80,6 @@ FunctionPass *createX86WinEHStatePass();
/// must run after prologue/epilogue insertion and before lowering
/// the MachineInstr to MC.
FunctionPass *createX86ExpandPseudoPass();
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index 64fc6d0d7e5c..205140144ab5 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -511,7 +511,7 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
}
void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
- Triple TT(TM.getTargetTriple());
+ const Triple &TT = TM.getTargetTriple();
if (TT.isOSBinFormatMachO())
OutStreamer->SwitchSection(getObjFileLowering().getTextSection());
@@ -585,7 +585,7 @@ void X86AsmPrinter::GenerateExportDirective(const MCSymbol *Sym, bool IsData) {
SmallString<128> Directive;
raw_svector_ostream OS(Directive);
StringRef Name = Sym->getName();
- Triple TT(TM.getTargetTriple());
+ const Triple &TT = TM.getTargetTriple();
if (TT.isKnownWindowsMSVCEnvironment())
OS << " /EXPORT:";
@@ -610,7 +610,7 @@ void X86AsmPrinter::GenerateExportDirective(const MCSymbol *Sym, bool IsData) {
}
void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
- Triple TT(TM.getTargetTriple());
+ const Triple &TT = TM.getTargetTriple();
if (TT.isOSBinFormatMachO()) {
// All darwin targets use mach-o.
@@ -674,6 +674,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
}
SM.serializeToStackMapSection();
+ FM.serializeToFaultMapSection();
// Funny Darwin hack: This flag tells the linker that no global symbols
// contain code that falls through to other global symbols (e.g. the obvious
@@ -726,8 +727,10 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
}
}
- if (TT.isOSBinFormatELF())
+ if (TT.isOSBinFormatELF()) {
SM.serializeToStackMapSection();
+ FM.serializeToFaultMapSection();
+ }
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index 3beeb1752bf5..acba21169c9c 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -12,6 +12,7 @@
#include "X86Subtarget.h"
#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/FaultMaps.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/Target/TargetMachine.h"
@@ -27,6 +28,7 @@ class MCSymbol;
class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
StackMaps SM;
+ FaultMaps FM;
void GenerateExportDirective(const MCSymbol *Sym, bool IsData);
@@ -83,13 +85,15 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
void LowerSTACKMAP(const MachineInstr &MI);
void LowerPATCHPOINT(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerSTATEPOINT(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerFAULTING_LOAD_OP(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerTlsAddr(X86MCInstLower &MCInstLowering, const MachineInstr &MI);
public:
explicit X86AsmPrinter(TargetMachine &TM,
std::unique_ptr<MCStreamer> Streamer)
- : AsmPrinter(TM, std::move(Streamer)), SM(*this), SMShadowTracker(TM) {}
+ : AsmPrinter(TM, std::move(Streamer)), SM(*this), FM(*this),
+ SMShadowTracker(TM) {}
const char *getPassName() const override {
return "X86 Assembly / Object Emitter";
diff --git a/lib/Target/X86/X86CallFrameOptimization.cpp b/lib/Target/X86/X86CallFrameOptimization.cpp
index 44121256ef00..6d6831b18b0a 100644
--- a/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -99,7 +99,7 @@ private:
};
char X86CallFrameOptimization::ID = 0;
-}
+} // namespace
FunctionPass *llvm::createX86CallFrameOptimization() {
return new X86CallFrameOptimization();
diff --git a/lib/Target/X86/X86CallingConv.h b/lib/Target/X86/X86CallingConv.h
index 0eb2494f1d63..a377eb6051ae 100644
--- a/lib/Target/X86/X86CallingConv.h
+++ b/lib/Target/X86/X86CallingConv.h
@@ -42,7 +42,7 @@ inline bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &,
return false;
}
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86ExpandPseudo.cpp b/lib/Target/X86/X86ExpandPseudo.cpp
index 1b00997e7504..6a5a28e546f2 100644
--- a/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/lib/Target/X86/X86ExpandPseudo.cpp
@@ -84,19 +84,9 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
int StackAdj = StackAdjust.getImm();
if (StackAdj) {
- bool Is64Bit = STI->is64Bit();
- // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
- const bool Uses64BitFramePtr =
- STI->isTarget64BitLP64() || STI->isTargetNaCl64();
- // Check if we should use LEA for SP.
- bool UseLEAForSP = STI->useLeaForSP() &&
- X86FL->canUseLEAForSPInEpilogue(*MBB.getParent());
- unsigned StackPtr = TRI->getStackRegister();
// Check for possible merge with preceding ADD instruction.
- StackAdj += X86FrameLowering::mergeSPUpdates(MBB, MBBI, StackPtr, true);
- X86FrameLowering::emitSPUpdate(MBB, MBBI, StackPtr, StackAdj, Is64Bit,
- Uses64BitFramePtr, UseLEAForSP, *TII,
- *TRI);
+ StackAdj += X86FL->mergeSPUpdates(MBB, MBBI, true);
+ X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
}
// Jump to label or value in register.
diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp
index b39c5aba30bf..8305a0454c80 100644
--- a/lib/Target/X86/X86FixupLEAs.cpp
+++ b/lib/Target/X86/X86FixupLEAs.cpp
@@ -44,7 +44,7 @@ class FixupLEAPass : public MachineFunctionPass {
/// \brief Given a machine register, look for the instruction
/// which writes it in the current basic block. If found,
/// try to replace it with an equivalent LEA instruction.
- /// If replacement succeeds, then also process the the newly created
+ /// If replacement succeeds, then also process the newly created
/// instruction.
void seekLEAFixup(MachineOperand &p, MachineBasicBlock::iterator &I,
MachineFunction::iterator MFI);
@@ -91,7 +91,7 @@ private:
const X86InstrInfo *TII; // Machine instruction info.
};
char FixupLEAPass::ID = 0;
-}
+} // namespace
MachineInstr *
FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp
index 3b0bd03095a9..6f1d8e523732 100644
--- a/lib/Target/X86/X86FloatingPoint.cpp
+++ b/lib/Target/X86/X86FloatingPoint.cpp
@@ -279,7 +279,7 @@ namespace {
void setKillFlags(MachineBasicBlock &MBB) const;
};
char FPS::ID = 0;
-}
+} // namespace
FunctionPass *llvm::createX86FloatingPointStackifierPass() { return new FPS(); }
@@ -544,7 +544,7 @@ namespace {
return V < TE.from;
}
};
-}
+} // namespace
#ifndef NDEBUG
static bool TableIsSorted(const TableEntry *Table, unsigned NumEntries) {
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index db58d9c5f301..85c5b6499131 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -37,6 +37,20 @@ using namespace llvm;
// FIXME: completely move here.
extern cl::opt<bool> ForceStackAlign;
+X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
+ unsigned StackAlignOverride)
+ : TargetFrameLowering(StackGrowsDown, StackAlignOverride,
+ STI.is64Bit() ? -8 : -4),
+ STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
+ // Cache a bunch of frame-related predicates for this subtarget.
+ SlotSize = TRI->getSlotSize();
+ Is64Bit = STI.is64Bit();
+ IsLP64 = STI.isTarget64BitLP64();
+ // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
+ Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
+ StackPtr = TRI->getStackRegister();
+}
+
bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
return !MF.getFrameInfo()->hasVarSizedObjects() &&
!MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
@@ -48,11 +62,9 @@ bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
/// Use a more nuanced condition.
bool
X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
- const X86RegisterInfo *TRI = static_cast<const X86RegisterInfo *>
- (MF.getSubtarget().getRegisterInfo());
return hasReservedCallFrame(MF) ||
- (hasFP(MF) && !TRI->needsStackRealignment(MF))
- || TRI->hasBasePointer(MF);
+ (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
+ TRI->hasBasePointer(MF);
}
// needsFrameIndexResolution - Do we need to perform FI resolution for
@@ -74,10 +86,9 @@ X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
const MachineModuleInfo &MMI = MF.getMMI();
- const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
- RegInfo->needsStackRealignment(MF) ||
+ TRI->needsStackRealignment(MF) ||
MFI->hasVarSizedObjects() ||
MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||
MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
@@ -137,7 +148,7 @@ static unsigned getLEArOpcode(unsigned IsLP64) {
/// to this register without worry about clobbering it.
static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- const TargetRegisterInfo &TRI,
+ const TargetRegisterInfo *TRI,
bool Is64Bit) {
const MachineFunction *MF = MBB.getParent();
const Function *F = MF->getFunction();
@@ -176,7 +187,7 @@ static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
unsigned Reg = MO.getReg();
if (!Reg)
continue;
- for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
+ for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Uses.insert(*AI);
}
@@ -203,23 +214,36 @@ static bool isEAXLiveIn(MachineFunction &MF) {
return false;
}
+/// Check whether or not the terminators of \p MBB needs to read EFLAGS.
+static bool terminatorsNeedFlagsAsInput(const MachineBasicBlock &MBB) {
+ for (const MachineInstr &MI : MBB.terminators()) {
+ bool BreakNext = false;
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (Reg != X86::EFLAGS)
+ continue;
+
+ // This terminator needs an eflag that is not defined
+ // by a previous terminator.
+ if (!MO.isDef())
+ return true;
+ BreakNext = true;
+ }
+ if (BreakNext)
+ break;
+ }
+ return false;
+}
+
/// emitSPUpdate - Emit a series of instructions to increment / decrement the
/// stack pointer by a constant value.
void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr, int64_t NumBytes,
- bool Is64BitTarget, bool Is64BitStackPtr,
- bool UseLEA, const TargetInstrInfo &TII,
- const TargetRegisterInfo &TRI) {
+ int64_t NumBytes, bool InEpilogue) const {
bool isSub = NumBytes < 0;
uint64_t Offset = isSub ? -NumBytes : NumBytes;
- unsigned Opc;
- if (UseLEA)
- Opc = getLEArOpcode(Is64BitStackPtr);
- else
- Opc = isSub
- ? getSUBriOpcode(Is64BitStackPtr, Offset)
- : getADDriOpcode(Is64BitStackPtr, Offset);
uint64_t Chunk = (1LL << 31) - 1;
DebugLoc DL = MBB.findDebugLoc(MBBI);
@@ -231,17 +255,17 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
unsigned Reg = 0;
if (isSub && !isEAXLiveIn(*MBB.getParent()))
- Reg = (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX);
+ Reg = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
else
- Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);
+ Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
if (Reg) {
- Opc = Is64BitTarget ? X86::MOV64ri : X86::MOV32ri;
+ unsigned Opc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
.addImm(Offset);
Opc = isSub
- ? getSUBrrOpcode(Is64BitTarget)
- : getADDrrOpcode(Is64BitTarget);
+ ? getSUBrrOpcode(Is64Bit)
+ : getADDrrOpcode(Is64Bit);
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
.addReg(StackPtr)
.addReg(Reg);
@@ -252,15 +276,15 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
}
uint64_t ThisVal = std::min(Offset, Chunk);
- if (ThisVal == (Is64BitTarget ? 8 : 4)) {
+ if (ThisVal == (Is64Bit ? 8 : 4)) {
// Use push / pop instead.
unsigned Reg = isSub
- ? (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX)
- : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);
+ ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
+ : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
if (Reg) {
- Opc = isSub
- ? (Is64BitTarget ? X86::PUSH64r : X86::PUSH32r)
- : (Is64BitTarget ? X86::POP64r : X86::POP32r);
+ unsigned Opc = isSub
+ ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
+ : (Is64Bit ? X86::POP64r : X86::POP32r);
MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
.addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
if (isSub)
@@ -270,25 +294,59 @@ void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
}
}
- MachineInstr *MI = nullptr;
-
- if (UseLEA) {
- MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
- StackPtr, false, isSub ? -ThisVal : ThisVal);
- } else {
- MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
- .addReg(StackPtr)
- .addImm(ThisVal);
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
- }
-
+ MachineInstrBuilder MI = BuildStackAdjustment(
+ MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue);
if (isSub)
- MI->setFlag(MachineInstr::FrameSetup);
+ MI.setMIFlag(MachineInstr::FrameSetup);
Offset -= ThisVal;
}
}
+MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ int64_t Offset, bool InEpilogue) const {
+ assert(Offset != 0 && "zero offset stack adjustment requested");
+
+ // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
+ // is tricky.
+ bool UseLEA;
+ if (!InEpilogue) {
+ UseLEA = STI.useLeaForSP();
+ } else {
+ // If we can use LEA for SP but we shouldn't, check that none
+ // of the terminators uses the eflags. Otherwise we will insert
+ // a ADD that will redefine the eflags and break the condition.
+ // Alternatively, we could move the ADD, but this may not be possible
+ // and is an optimization anyway.
+ UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
+ if (UseLEA && !STI.useLeaForSP())
+ UseLEA = terminatorsNeedFlagsAsInput(MBB);
+ // If that assert breaks, that means we do not do the right thing
+ // in canUseAsEpilogue.
+ assert((UseLEA || !terminatorsNeedFlagsAsInput(MBB)) &&
+ "We shouldn't have allowed this insertion point");
+ }
+
+ MachineInstrBuilder MI;
+ if (UseLEA) {
+ MI = addRegOffset(BuildMI(MBB, MBBI, DL,
+ TII.get(getLEArOpcode(Uses64BitFramePtr)),
+ StackPtr),
+ StackPtr, false, Offset);
+ } else {
+ bool IsSub = Offset < 0;
+ uint64_t AbsOffset = IsSub ? -Offset : Offset;
+ unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
+ : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
+ MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(AbsOffset);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ }
+ return MI;
+}
+
/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
static
void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
@@ -315,8 +373,7 @@ void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr,
- bool doMergeWithPrevious) {
+ bool doMergeWithPrevious) const {
if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
(!doMergeWithPrevious && MBBI == MBB.end()))
return 0;
@@ -345,6 +402,15 @@ int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
return Offset;
}
+void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ MCCFIInstruction CFIInst) const {
+ MachineFunction &MF = *MBB.getParent();
+ unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+}
+
void
X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -353,7 +419,6 @@ X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo &MMI = MF.getMMI();
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
@@ -366,11 +431,8 @@ X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
unsigned Reg = I->getReg();
unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
- unsigned CFIIndex =
- MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,
- Offset));
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ BuildCFI(MBB, MBBI, DL,
+ MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
}
}
@@ -394,10 +456,7 @@ static bool usesTheStack(const MachineFunction &MF) {
void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
- DebugLoc DL) {
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- const TargetInstrInfo &TII = *STI.getInstrInfo();
- bool Is64Bit = STI.is64Bit();
+ DebugLoc DL) const {
bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
unsigned CallOp;
@@ -463,13 +522,10 @@ static unsigned calculateSetFPREG(uint64_t SPAdjust) {
// info, we need to know the ABI stack alignment as well in case we
// have a call out. Otherwise just make sure we have some alignment - we'll
// go with the minimum SlotSize.
-static uint64_t calculateMaxStackAlign(const MachineFunction &MF) {
+uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- const X86RegisterInfo *RegInfo = STI.getRegisterInfo();
- unsigned SlotSize = RegInfo->getSlotSize();
- unsigned StackAlign = STI.getFrameLowering()->getStackAlignment();
+ unsigned StackAlign = getStackAlignment();
if (ForceStackAlign) {
if (MFI->hasCalls())
MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
@@ -479,6 +535,22 @@ static uint64_t calculateMaxStackAlign(const MachineFunction &MF) {
return MaxAlign;
}
+void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL,
+ uint64_t MaxAlign) const {
+ uint64_t Val = -MaxAlign;
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL, TII.get(getANDriOpcode(Uses64BitFramePtr, Val)),
+ StackPtr)
+ .addReg(StackPtr)
+ .addImm(Val)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
+}
+
/// emitPrologue - Push callee-saved registers onto the stack, which
/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
/// space for local variables. Also emit labels used by the exception handler to
@@ -565,40 +637,32 @@ static uint64_t calculateMaxStackAlign(const MachineFunction &MF) {
void X86FrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
+ assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
+ "MF used frame lowering for wrong subtarget");
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *Fn = MF.getFunction();
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- const X86RegisterInfo *RegInfo = STI.getRegisterInfo();
- const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
bool HasFP = hasFP(MF);
- bool Is64Bit = STI.is64Bit();
- // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
- const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
- bool IsWin64 = STI.isCallingConvWin64(Fn->getCallingConv());
- // Not necessarily synonymous with IsWin64.
- bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool NeedsWinEH = IsWinEH && Fn->needsUnwindTableEntry();
+ bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
+ bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ bool NeedsWinCFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
bool NeedsDwarfCFI =
- !IsWinEH && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
- bool UseLEA = STI.useLeaForSP();
- unsigned SlotSize = RegInfo->getSlotSize();
- unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
+ unsigned FramePtr = TRI->getFrameRegister(MF);
const unsigned MachineFramePtr =
STI.isTarget64BitILP32()
? getX86SubSuperRegister(FramePtr, MVT::i64, false)
: FramePtr;
- unsigned StackPtr = RegInfo->getStackRegister();
- unsigned BasePtr = RegInfo->getBaseRegister();
+ unsigned BasePtr = TRI->getBaseRegister();
DebugLoc DL;
// Add RETADDR move area to callee saved frame size.
int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
- if (TailCallReturnAddrDelta && IsWinEH)
+ if (TailCallReturnAddrDelta && IsWin64Prologue)
report_fatal_error("Can't handle guaranteed tail call under win64 yet");
if (TailCallReturnAddrDelta < 0)
@@ -621,10 +685,10 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// stack pointer (we fit in the Red Zone). We also check that we don't
// push and pop from the stack.
if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
- !RegInfo->needsStackRealignment(MF) &&
+ !TRI->needsStackRealignment(MF) &&
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
- !IsWin64 && // Win64 has no Red Zone
+ !IsWin64CC && // Win64 has no Red Zone
!usesTheStack(MF) && // Don't push and pop.
!MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
@@ -637,14 +701,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// applies to tail call optimized functions where the callee argument stack
// size is bigger than the callers.
if (TailCallReturnAddrDelta < 0) {
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL,
- TII.get(getSUBriOpcode(Uses64BitFramePtr, -TailCallReturnAddrDelta)),
- StackPtr)
- .addReg(StackPtr)
- .addImm(-TailCallReturnAddrDelta)
+ BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
+ /*InEpilogue=*/false)
.setMIFlag(MachineInstr::FrameSetup);
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
}
// Mapping for machine moves:
@@ -674,7 +733,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
// Callee-saved registers are pushed on stack before the stack is realigned.
- if (RegInfo->needsStackRealignment(MF) && !IsWinEH)
+ if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
// Get the offset of the stack slot for the EBP register, which is
@@ -691,27 +750,22 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Mark the place where EBP/RBP was saved.
// Define the current CFA rule to use the provided offset.
assert(StackSize);
- unsigned CFIIndex = MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ BuildCFI(MBB, MBBI, DL,
+ MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
// Change the rule for the FramePtr to be an "offset" rule.
- unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);
- CFIIndex = MMI.addFrameInst(
- MCCFIInstruction::createOffset(nullptr,
- DwarfFramePtr, 2 * stackGrowth));
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
+ BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
+ nullptr, DwarfFramePtr, 2 * stackGrowth));
}
- if (NeedsWinEH) {
+ if (NeedsWinCFI) {
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
.addImm(FramePtr)
.setMIFlag(MachineInstr::FrameSetup);
}
- if (!IsWinEH) {
+ if (!IsWin64Prologue) {
// Update EBP with the new base value.
BuildMI(MBB, MBBI, DL,
TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
@@ -723,11 +777,9 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
if (NeedsDwarfCFI) {
// Mark effective beginning of when frame pointer becomes valid.
// Define the current CFA to use the EBP/RBP register.
- unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);
- unsigned CFIIndex = MMI.addFrameInst(
- MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
+ BuildCFI(MBB, MBBI, DL,
+ MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
}
// Mark the FramePtr as live-in in every block.
@@ -752,14 +804,12 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Mark callee-saved push instruction.
// Define the current CFA rule to use the provided offset.
assert(StackSize);
- unsigned CFIIndex = MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ BuildCFI(MBB, MBBI, DL,
+ MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
StackOffset += stackGrowth;
}
- if (NeedsWinEH) {
+ if (NeedsWinCFI) {
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
MachineInstr::FrameSetup);
}
@@ -768,24 +818,15 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// Realign stack after we pushed callee-saved registers (so that we'll be
// able to calculate their offsets from the frame pointer).
// Don't do this for Win64, it needs to realign the stack after the prologue.
- if (!IsWinEH && RegInfo->needsStackRealignment(MF)) {
+ if (!IsWin64Prologue && TRI->needsStackRealignment(MF)) {
assert(HasFP && "There should be a frame pointer if stack is realigned.");
- uint64_t Val = -MaxAlign;
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL, TII.get(getANDriOpcode(Uses64BitFramePtr, Val)),
- StackPtr)
- .addReg(StackPtr)
- .addImm(Val)
- .setMIFlag(MachineInstr::FrameSetup);
-
- // The EFLAGS implicit def is dead.
- MI->getOperand(3).setIsDead();
+ BuildStackAlignAND(MBB, MBBI, DL, MaxAlign);
}
// If there is an SUB32ri of ESP immediately before this instruction, merge
// the two. This can be the case when tail call elimination is enabled and
// the callee has more arguments then the caller.
- NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
+ NumBytes -= mergeSPUpdates(MBB, MBBI, true);
// Adjust stack pointer: ESP -= numbytes.
@@ -798,7 +839,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
// increments is necessary to ensure that the guard pages used by the OS
// virtual memory manager are allocated in correct sequence.
uint64_t AlignedNumBytes = NumBytes;
- if (IsWinEH && RegInfo->needsStackRealignment(MF))
+ if (IsWin64Prologue && TRI->needsStackRealignment(MF))
AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
// Check whether EAX is livein for this function.
@@ -859,17 +900,16 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
MBB.insert(MBBI, MI);
}
} else if (NumBytes) {
- emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, Uses64BitFramePtr,
- UseLEA, TII, *RegInfo);
+ emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false);
}
- if (NeedsWinEH && NumBytes)
+ if (NeedsWinCFI && NumBytes)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
.addImm(NumBytes)
.setMIFlag(MachineInstr::FrameSetup);
int SEHFrameOffset = 0;
- if (IsWinEH && HasFP) {
+ if (IsWin64Prologue && HasFP) {
SEHFrameOffset = calculateSetFPREG(NumBytes);
if (SEHFrameOffset)
addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
@@ -877,7 +917,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
else
BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr).addReg(StackPtr);
- if (NeedsWinEH)
+ if (NeedsWinCFI)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
.addImm(FramePtr)
.addImm(SEHFrameOffset)
@@ -888,7 +928,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
const MachineInstr *FrameInstr = &*MBBI;
++MBBI;
- if (NeedsWinEH) {
+ if (NeedsWinCFI) {
int FI;
if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
if (X86::FR64RegClass.contains(Reg)) {
@@ -904,32 +944,23 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
}
}
- if (NeedsWinEH)
+ if (NeedsWinCFI)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
.setMIFlag(MachineInstr::FrameSetup);
// Realign stack after we spilled callee-saved registers (so that we'll be
// able to calculate their offsets from the frame pointer).
// Win64 requires aligning the stack after the prologue.
- if (IsWinEH && RegInfo->needsStackRealignment(MF)) {
+ if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
assert(HasFP && "There should be a frame pointer if stack is realigned.");
- uint64_t Val = -MaxAlign;
- MachineInstr *MI =
- BuildMI(MBB, MBBI, DL, TII.get(getANDriOpcode(Uses64BitFramePtr, Val)),
- StackPtr)
- .addReg(StackPtr)
- .addImm(Val)
- .setMIFlag(MachineInstr::FrameSetup);
-
- // The EFLAGS implicit def is dead.
- MI->getOperand(3).setIsDead();
+ BuildStackAlignAND(MBB, MBBI, DL, MaxAlign);
}
// If we need a base pointer, set it up here. It's whatever the value
// of the stack pointer is at this point. Any variable size objects
// will be allocated after this, so we can still use the base pointer
// to reference locals.
- if (RegInfo->hasBasePointer(MF)) {
+ if (TRI->hasBasePointer(MF)) {
// Update the base pointer with the current stack pointer.
unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
@@ -950,12 +981,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF,
if (!HasFP && NumBytes) {
// Define the current CFA rule to use the provided offset.
assert(StackSize);
- unsigned CFIIndex = MMI.addFrameInst(
- MCCFIInstruction::createDefCfaOffset(nullptr,
- -StackSize + stackGrowth));
-
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
- .addCFIIndex(CFIIndex);
+ BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
+ nullptr, -StackSize + stackGrowth));
}
// Emit DWARF info specifying the offsets of the callee-saved registers.
@@ -975,65 +1002,24 @@ bool X86FrameLowering::canUseLEAForSPInEpilogue(
return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
}
-/// Check whether or not the terminators of \p MBB needs to read EFLAGS.
-static bool terminatorsNeedFlagsAsInput(const MachineBasicBlock &MBB) {
- for (const MachineInstr &MI : MBB.terminators()) {
- bool BreakNext = false;
- for (const MachineOperand &MO : MI.operands()) {
- if (!MO.isReg())
- continue;
- unsigned Reg = MO.getReg();
- if (Reg != X86::EFLAGS)
- continue;
-
- // This terminator needs an eflag that is not defined
- // by a previous terminator.
- if (!MO.isDef())
- return true;
- BreakNext = true;
- }
- if (BreakNext)
- break;
- }
- return false;
-}
-
void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- const X86RegisterInfo *RegInfo = STI.getRegisterInfo();
- const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
DebugLoc DL;
if (MBBI != MBB.end())
DL = MBBI->getDebugLoc();
- bool Is64Bit = STI.is64Bit();
// standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
- const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
const bool Is64BitILP32 = STI.isTarget64BitILP32();
- unsigned SlotSize = RegInfo->getSlotSize();
- unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ unsigned FramePtr = TRI->getFrameRegister(MF);
unsigned MachineFramePtr =
Is64BitILP32 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
: FramePtr;
- unsigned StackPtr = RegInfo->getStackRegister();
-
- bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool NeedsWinEH = IsWinEH && MF.getFunction()->needsUnwindTableEntry();
- bool UseLEAForSP = canUseLEAForSPInEpilogue(MF);
- // If we can use LEA for SP but we shouldn't, check that none
- // of the terminators uses the eflags. Otherwise we will insert
- // a ADD that will redefine the eflags and break the condition.
- // Alternatively, we could move the ADD, but this may not be possible
- // and is an optimization anyway.
- if (UseLEAForSP && !MF.getSubtarget<X86Subtarget>().useLeaForSP())
- UseLEAForSP = terminatorsNeedFlagsAsInput(MBB);
- // If that assert breaks, that means we do not do the right thing
- // in canUseAsEpilogue.
- assert((UseLEAForSP || !terminatorsNeedFlagsAsInput(MBB)) &&
- "We shouldn't have allowed this insertion point");
+
+ bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ bool NeedsWinCFI =
+ IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
// Get the number of bytes to allocate from the FrameInfo.
uint64_t StackSize = MFI->getStackSize();
@@ -1048,7 +1034,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// Callee-saved registers were pushed on stack before the stack was
// realigned.
- if (RegInfo->needsStackRealignment(MF) && !IsWinEH)
+ if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
// Pop EBP.
@@ -1083,11 +1069,12 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// If dynamic alloca is used, then reset esp to point to the last callee-saved
// slot before popping them off! Same applies for the case, when stack was
// realigned.
- if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
- if (RegInfo->needsStackRealignment(MF))
+ if (TRI->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
+ if (TRI->needsStackRealignment(MF))
MBBI = FirstCSPop;
unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
- uint64_t LEAAmount = IsWinEH ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
+ uint64_t LEAAmount =
+ IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
// There are only two legal forms of epilogue:
// - add SEHAllocationSize, %rsp
@@ -1109,8 +1096,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
}
} else if (NumBytes) {
// Adjust stack pointer back: ESP += numbytes.
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, Uses64BitFramePtr,
- UseLEAForSP, TII, *RegInfo);
+ emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true);
--MBBI;
}
@@ -1120,7 +1106,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
// into the epilogue. To cope with that, we insert an epilogue marker here,
// then replace it with a 'nop' if it ends up immediately after a CALL in the
// final emitted code.
- if (NeedsWinEH)
+ if (NeedsWinCFI)
BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
// Add the return addr area delta back since we are not tail calling.
@@ -1130,16 +1116,13 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF,
MBBI = MBB.getFirstTerminator();
// Check for possible merge with preceding ADD instruction.
- Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
- emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, Uses64BitFramePtr,
- UseLEAForSP, TII, *RegInfo);
+ Offset += mergeSPUpdates(MBB, MBBI, true);
+ emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
}
}
int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
int FI) const {
- const X86RegisterInfo *RegInfo =
- MF.getSubtarget<X86Subtarget>().getRegisterInfo();
const MachineFrameInfo *MFI = MF.getFrameInfo();
// Offset will hold the offset from the stack pointer at function entry to the
// object.
@@ -1149,12 +1132,11 @@ int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
unsigned CSSize = X86FI->getCalleeSavedFrameSize();
uint64_t StackSize = MFI->getStackSize();
- unsigned SlotSize = RegInfo->getSlotSize();
bool HasFP = hasFP(MF);
- bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
int64_t FPDelta = 0;
- if (IsWinEH) {
+ if (IsWin64Prologue) {
assert(!MFI->hasCalls() || (StackSize % 16) == 8);
// Calculate required stack adjustment.
@@ -1178,7 +1160,7 @@ int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
}
- if (RegInfo->hasBasePointer(MF)) {
+ if (TRI->hasBasePointer(MF)) {
assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
if (FI < 0) {
// Skip the saved EBP.
@@ -1187,7 +1169,7 @@ int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
return Offset + StackSize;
}
- } else if (RegInfo->needsStackRealignment(MF)) {
+ } else if (TRI->needsStackRealignment(MF)) {
if (FI < 0) {
// Skip the saved EBP.
return Offset + SlotSize + FPDelta;
@@ -1214,17 +1196,15 @@ int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const {
- const X86RegisterInfo *RegInfo =
- MF.getSubtarget<X86Subtarget>().getRegisterInfo();
// We can't calculate offset from frame pointer if the stack is realigned,
// so enforce usage of stack/base pointer. The base pointer is used when we
// have dynamic allocas in addition to dynamic realignment.
- if (RegInfo->hasBasePointer(MF))
- FrameReg = RegInfo->getBaseRegister();
- else if (RegInfo->needsStackRealignment(MF))
- FrameReg = RegInfo->getStackRegister();
+ if (TRI->hasBasePointer(MF))
+ FrameReg = TRI->getBaseRegister();
+ else if (TRI->needsStackRealignment(MF))
+ FrameReg = TRI->getStackRegister();
else
- FrameReg = RegInfo->getFrameRegister(MF);
+ FrameReg = TRI->getFrameRegister(MF);
return getFrameIndexOffset(MF, FI);
}
@@ -1235,8 +1215,6 @@ int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int F
const uint64_t StackSize = MFI->getStackSize();
{
#ifndef NDEBUG
- const X86RegisterInfo *RegInfo =
- MF.getSubtarget<X86Subtarget>().getRegisterInfo();
// Note: LLVM arranges the stack as:
// Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)
// > "Stack Slots" (<--SP)
@@ -1248,7 +1226,7 @@ int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int F
// frame). As a result, THE RESULT OF THIS CALL IS MEANINGLESS FOR CSRs
// AND FixedObjects IFF needsStackRealignment or hasVarSizedObject.
- assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");
+ assert(!TRI->hasBasePointer(MF) && "we don't handle this case");
// We don't handle tail calls, and shouldn't be seeing them
// either.
@@ -1293,11 +1271,9 @@ int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int F
int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
int FI,
unsigned &FrameReg) const {
- const X86RegisterInfo *RegInfo =
- MF.getSubtarget<X86Subtarget>().getRegisterInfo();
- assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");
+ assert(!TRI->hasBasePointer(MF) && "we don't handle this case");
- FrameReg = RegInfo->getStackRegister();
+ FrameReg = TRI->getStackRegister();
return getFrameIndexOffsetFromSP(MF, FI);
}
@@ -1305,9 +1281,6 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
MachineFunction &MF, const TargetRegisterInfo *TRI,
std::vector<CalleeSavedInfo> &CSI) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
- const X86RegisterInfo *RegInfo =
- MF.getSubtarget<X86Subtarget>().getRegisterInfo();
- unsigned SlotSize = RegInfo->getSlotSize();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
unsigned CalleeSavedFrameSize = 0;
@@ -1321,7 +1294,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
// Since emitPrologue and emitEpilogue will handle spilling and restoring of
// the frame register, we can delete it from CSI list and not have to worry
// about avoiding it later.
- unsigned FPReg = RegInfo->getFrameRegister(MF);
+ unsigned FPReg = TRI->getFrameRegister(MF);
for (unsigned i = 0; i < CSI.size(); ++i) {
if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
CSI.erase(CSI.begin() + i);
@@ -1352,7 +1325,7 @@ bool X86FrameLowering::assignCalleeSavedSpillSlots(
if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
continue;
- const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
// ensure alignment
SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
// spill into slot
@@ -1372,10 +1345,6 @@ bool X86FrameLowering::spillCalleeSavedRegisters(
const TargetRegisterInfo *TRI) const {
DebugLoc DL = MBB.findDebugLoc(MI);
- MachineFunction &MF = *MBB.getParent();
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- const TargetInstrInfo &TII = *STI.getInstrInfo();
-
// Push GPRs. It increases frame size.
unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
for (unsigned i = CSI.size(); i != 0; --i) {
@@ -1419,10 +1388,6 @@ bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
DebugLoc DL = MBB.findDebugLoc(MI);
- MachineFunction &MF = *MBB.getParent();
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- const TargetInstrInfo &TII = *STI.getInstrInfo();
-
// Reload XMMs from stack frame.
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
@@ -1451,9 +1416,6 @@ void
X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
- const X86RegisterInfo *RegInfo =
- MF.getSubtarget<X86Subtarget>().getRegisterInfo();
- unsigned SlotSize = RegInfo->getSlotSize();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
@@ -1473,8 +1435,8 @@ X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
}
// Spill the BasePtr if it's used.
- if (RegInfo->hasBasePointer(MF))
- MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
+ if (TRI->hasBasePointer(MF))
+ MF.getRegInfo().setPhysRegUsed(TRI->getBaseRegister());
}
static bool
@@ -1532,11 +1494,7 @@ static const uint64_t kSplitStackAvailable = 256;
void X86FrameLowering::adjustForSegmentedStacks(
MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- const TargetInstrInfo &TII = *STI.getInstrInfo();
uint64_t StackSize;
- bool Is64Bit = STI.is64Bit();
- const bool IsLP64 = STI.isTarget64BitLP64();
unsigned TlsReg, TlsOffset;
DebugLoc DL;
@@ -1782,12 +1740,7 @@ void X86FrameLowering::adjustForSegmentedStacks(
/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
void X86FrameLowering::adjustForHiPEPrologue(
MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- const TargetInstrInfo &TII = *STI.getInstrInfo();
MachineFrameInfo *MFI = MF.getFrameInfo();
- const unsigned SlotSize = STI.getRegisterInfo()->getSlotSize();
- const bool Is64Bit = STI.is64Bit();
- const bool IsLP64 = STI.isTarget64BitLP64();
DebugLoc DL;
// HiPE-specific values
const unsigned HipeLeafWords = 24;
@@ -1915,14 +1868,9 @@ void X86FrameLowering::adjustForHiPEPrologue(
void X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
- const TargetInstrInfo &TII = *STI.getInstrInfo();
- const X86RegisterInfo &RegInfo = *STI.getRegisterInfo();
- unsigned StackPtr = RegInfo.getStackRegister();
bool reserveCallFrame = hasReservedCallFrame(MF);
unsigned Opcode = I->getOpcode();
bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
- bool IsLP64 = STI.isTarget64BitLP64();
DebugLoc DL = I->getDebugLoc();
uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;
@@ -1941,54 +1889,29 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
unsigned StackAlign = getStackAlignment();
Amount = RoundUpToAlignment(Amount, StackAlign);
- MachineInstr *New = nullptr;
-
// Factor out the amount that gets handled inside the sequence
// (Pushes of argument for frame setup, callee pops for frame destroy)
Amount -= InternalAmt;
if (Amount) {
- if (Opcode == TII.getCallFrameSetupOpcode()) {
- New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)), StackPtr)
- .addReg(StackPtr).addImm(Amount);
- } else {
- assert(Opcode == TII.getCallFrameDestroyOpcode());
-
- unsigned Opc = getADDriOpcode(IsLP64, Amount);
- New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
- .addReg(StackPtr).addImm(Amount);
- }
+ // Add Amount to SP to destroy a frame, and subtract to setup.
+ int Offset = isDestroy ? Amount : -Amount;
+ BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
}
-
- if (New) {
- // The EFLAGS implicit def is dead.
- New->getOperand(3).setIsDead();
-
- // Replace the pseudo instruction with a new instruction.
- MBB.insert(I, New);
- }
-
return;
}
- if (Opcode == TII.getCallFrameDestroyOpcode() && InternalAmt) {
+ if (isDestroy && InternalAmt) {
// If we are performing frame pointer elimination and if the callee pops
// something off the stack pointer, add it back. We do this until we have
// more advanced stack pointer tracking ability.
- unsigned Opc = getSUBriOpcode(IsLP64, InternalAmt);
- MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
- .addReg(StackPtr).addImm(InternalAmt);
-
- // The EFLAGS implicit def is dead.
- New->getOperand(3).setIsDead();
-
// We are not tracking the stack pointer adjustment by the callee, so make
// sure we restore the stack pointer immediately after the call, there may
// be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
MachineBasicBlock::iterator B = MBB.begin();
while (I != B && !std::prev(I)->isCall())
--I;
- MBB.insert(I, New);
+ BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
}
}
diff --git a/lib/Target/X86/X86FrameLowering.h b/lib/Target/X86/X86FrameLowering.h
index 5d03b4db45c1..2858e86cd0e0 100644
--- a/lib/Target/X86/X86FrameLowering.h
+++ b/lib/Target/X86/X86FrameLowering.h
@@ -18,16 +18,40 @@
namespace llvm {
+class MachineInstrBuilder;
+class MCCFIInstruction;
+class X86Subtarget;
+class X86RegisterInfo;
+
class X86FrameLowering : public TargetFrameLowering {
public:
- explicit X86FrameLowering(StackDirection D, unsigned StackAl, int LAO)
- : TargetFrameLowering(StackGrowsDown, StackAl, LAO) {}
+ X86FrameLowering(const X86Subtarget &STI, unsigned StackAlignOverride);
+
+ // Cached subtarget predicates.
+
+ const X86Subtarget &STI;
+ const TargetInstrInfo &TII;
+ const X86RegisterInfo *TRI;
+
+ unsigned SlotSize;
+
+ /// Is64Bit implies that x86_64 instructions are available.
+ bool Is64Bit;
+
+ bool IsLP64;
+
+ /// True if the 64-bit frame or stack pointer should be used. True for most
+ /// 64-bit targets with the exception of x32. If this is false, 32-bit
+ /// instruction operands should be used to manipulate StackPtr and FramePtr.
+ bool Uses64BitFramePtr;
+
+ unsigned StackPtr;
/// Emit a call to the target's stack probe function. This is required for all
/// large stack allocations on Windows. The caller is required to materialize
/// the number of bytes to probe in RAX/EAX.
- static void emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL);
+ void emitStackProbeCall(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL) const;
void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
@@ -83,18 +107,13 @@ public:
/// it is an ADD/SUB/LEA instruction it is deleted argument and the
/// stack adjustment is returned as a positive value for ADD/LEA and
/// a negative for SUB.
- static int mergeSPUpdates(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI,
- unsigned StackPtr, bool doMergeWithPrevious);
+ int mergeSPUpdates(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ bool doMergeWithPrevious) const;
/// Emit a series of instructions to increment / decrement the stack
/// pointer by a constant value.
- static void emitSPUpdate(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MBBI, unsigned StackPtr,
- int64_t NumBytes, bool Is64BitTarget,
- bool Is64BitStackPtr, bool UseLEA,
- const TargetInstrInfo &TII,
- const TargetRegisterInfo &TRI);
+ void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ int64_t NumBytes, bool InEpilogue) const;
/// Check that LEA can be used on SP in an epilogue sequence for \p MF.
bool canUseLEAForSPInEpilogue(const MachineFunction &MF) const;
@@ -115,8 +134,25 @@ private:
MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
uint64_t Amount) const;
+
+ uint64_t calculateMaxStackAlign(const MachineFunction &MF) const;
+
+ /// Wraps up getting a CFI index and building a MachineInstr for it.
+ void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, MCCFIInstruction CFIInst) const;
+
+ /// Aligns the stack pointer by ANDing it with -MaxAlign.
+ void BuildStackAlignAND(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ uint64_t MaxAlign) const;
+
+ /// Adjusts the stack pointer using LEA, SUB, or ADD.
+ MachineInstrBuilder BuildStackAdjustment(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, int64_t Offset,
+ bool InEpilogue) const;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index de591091f1ae..f6785e161188 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -138,7 +138,7 @@ namespace {
}
#endif
};
-}
+} // namespace
namespace {
//===--------------------------------------------------------------------===//
@@ -310,7 +310,7 @@ namespace {
return true;
}
};
-}
+} // namespace
bool
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index e3ec288a683e..ce1ca20ee81a 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -915,6 +915,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
+ setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
+
setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
// As there is no 64-bit GPR available, we need build a special custom
@@ -2233,7 +2235,9 @@ static bool IsCCallConvention(CallingConv::ID CC) {
}
bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
- if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
+ auto Attr =
+ CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
+ if (!CI->isTailCall() || Attr.getValueAsString() == "true")
return false;
CallSite CS(CI);
@@ -2762,8 +2766,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
StructReturnType SR = callIsStructReturn(Outs);
bool IsSibcall = false;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
+ auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
- if (MF.getTarget().Options.DisableTailCalls)
+ if (Attr.getValueAsString() == "true")
isTailCall = false;
if (Subtarget->isPICStyleGOT() &&
@@ -5441,7 +5446,7 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
///
/// Otherwise, the first horizontal binop dag node takes as input the lower
/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
-/// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
+/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
/// Example:
/// HADD V0_LO, V1_LO
/// HADD V0_HI, V1_HI
@@ -6353,7 +6358,7 @@ static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
///
/// This helper function produces an 8-bit shuffle immediate corresponding to
/// the ubiquitous shuffle encoding scheme used in x86 instructions for
-/// shuffling 8 lanes.
+/// shuffling 8 lanes.
static SDValue get1bitLaneShuffleImm8ForMask(ArrayRef<int> Mask, SDLoc DL,
SelectionDAG &DAG) {
assert(Mask.size() <= 8 &&
@@ -9380,6 +9385,30 @@ static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
DAG.getConstant(PermMask, DL, MVT::i8));
}
+/// \brief Handle lowering 4-lane 128-bit shuffles.
+static SDValue lowerV4X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
+ SDValue V2, ArrayRef<int> WidenedMask,
+ SelectionDAG &DAG) {
+
+ assert(WidenedMask.size() == 4 && "Unexpected mask size for 128bit shuffle!");
+ // form a 128-bit permutation.
+ // convert the 64-bit shuffle mask selection values into 128-bit selection
+ // bits defined by a vshuf64x2 instruction's immediate control byte.
+ unsigned PermMask = 0, Imm = 0;
+
+ for (int i = 0, Size = WidenedMask.size(); i < Size; ++i) {
+ if(WidenedMask[i] == SM_SentinelZero)
+ return SDValue();
+
+ // use first element in place of undef musk
+ Imm = (WidenedMask[i] == SM_SentinelUndef) ? 0 : WidenedMask[i];
+ PermMask |= (Imm % 4) << (i * 2);
+ }
+
+ return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
+ DAG.getConstant(PermMask, DL, MVT::i8));
+}
+
/// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
/// shuffling each lane.
///
@@ -10173,6 +10202,10 @@ static SDValue lowerV8X64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
+ SmallVector<int, 4> WidenedMask;
+ if (canWidenShuffleElements(Mask, WidenedMask))
+ if(SDValue Op = lowerV4X128VectorShuffle(DL, VT, V1, V2, WidenedMask, DAG))
+ return Op;
// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 2, 10, 4, 12, 6, 14}))
@@ -11023,9 +11056,8 @@ static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
if (Idx2->getZExtValue() == 0) {
SDValue Ops[] = { SubVec2, SubVec };
- SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
- if (LD.getNode())
- return LD;
+ if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false))
+ return Ld;
}
}
}
@@ -11617,15 +11649,21 @@ static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
- MVT SrcVT = Op.getOperand(0).getSimpleValueType();
+ SDValue Src = Op.getOperand(0);
+ MVT SrcVT = Src.getSimpleValueType();
+ MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
if (SrcVT.isVector()) {
+ if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
+ return DAG.getNode(X86ISD::CVTDQ2PD, dl, VT,
+ DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
+ DAG.getUNDEF(SrcVT)));
+ }
if (SrcVT.getVectorElementType() == MVT::i1) {
MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
- DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
- Op.getOperand(0)));
+ DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT, Src));
}
return SDValue();
}
@@ -13018,11 +13056,11 @@ SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
RecipOp = "vec-sqrtf";
else
return SDValue();
-
+
TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals;
if (!Recips.isEnabled(RecipOp))
return SDValue();
-
+
RefinementSteps = Recips.getRefinementSteps(RecipOp);
UseOneConstNR = false;
return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
@@ -13035,7 +13073,7 @@ SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
unsigned &RefinementSteps) const {
EVT VT = Op.getValueType();
const char *RecipOp;
-
+
// SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
// TODO: Add support for AVX512 (v16f32).
// It is likely not profitable to do this for f64 because a double-precision
@@ -13050,7 +13088,7 @@ SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
RecipOp = "vec-divf";
else
return SDValue();
-
+
TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals;
if (!Recips.isEnabled(RecipOp))
return SDValue();
@@ -13236,13 +13274,13 @@ static SDValue LowerBoolVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
DAG.getConstant(-1, dl, VT));
switch (SetCCOpcode) {
default: llvm_unreachable("Unexpected SETCC condition");
- case ISD::SETNE:
- // (x != y) -> ~(x ^ y)
+ case ISD::SETEQ:
+ // (x == y) -> ~(x ^ y)
return DAG.getNode(ISD::XOR, dl, VT,
DAG.getNode(ISD::XOR, dl, VT, Op0, Op1),
DAG.getConstant(-1, dl, VT));
- case ISD::SETEQ:
- // (x == y) -> (x ^ y)
+ case ISD::SETNE:
+ // (x != y) -> (x ^ y)
return DAG.getNode(ISD::XOR, dl, VT, Op0, Op1);
case ISD::SETUGT:
case ISD::SETGT:
@@ -15107,7 +15145,7 @@ static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
unsigned Round = cast<ConstantSDNode>(RoundingMode)->getZExtValue();
- if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION)
+ if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION)
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(), Src, RoundingMode),
Mask, PassThru, Subtarget, DAG);
@@ -15687,14 +15725,49 @@ static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
return DAG.getMergeValues(Results, DL);
}
+static SDValue LowerEXCEPTIONINFO(SDValue Op, const X86Subtarget *Subtarget,
+ SelectionDAG &DAG) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ SDLoc dl(Op);
+ SDValue FnOp = Op.getOperand(2);
+ SDValue FPOp = Op.getOperand(3);
+
+ // Compute the symbol for the parent EH registration. We know it'll get
+ // emitted later.
+ auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(FnOp)->getGlobal());
+ MCSymbol *ParentFrameSym =
+ MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
+ GlobalValue::getRealLinkageName(Fn->getName()));
+ StringRef Name = ParentFrameSym->getName();
+ assert(Name.data()[Name.size()] == '\0' && "not null terminated");
+
+ // Create a TargetExternalSymbol for the label to avoid any target lowering
+ // that would make this PC relative.
+ MVT PtrVT = Op.getSimpleValueType();
+ SDValue OffsetSym = DAG.getTargetExternalSymbol(Name.data(), PtrVT);
+ SDValue OffsetVal =
+ DAG.getNode(ISD::FRAME_ALLOC_RECOVER, dl, PtrVT, OffsetSym);
+
+ // Add the offset to the FP.
+ SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, FPOp, OffsetVal);
+
+ // Load the second field of the struct, which is 4 bytes in. See
+ // WinEHStatePass for more info.
+ Add = DAG.getNode(ISD::ADD, dl, PtrVT, Add, DAG.getConstant(4, dl, PtrVT));
+ return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Add, MachinePointerInfo(),
+ false, false, false, 0);
+}
static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
SelectionDAG &DAG) {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
- if (!IntrData)
+ if (!IntrData) {
+ if (IntNo == Intrinsic::x86_seh_exceptioninfo)
+ return LowerEXCEPTIONINFO(Op, Subtarget, DAG);
return SDValue();
+ }
SDLoc dl(Op);
switch(IntrData->Type) {
@@ -16464,6 +16537,8 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
+ SDValue AhiBlo = Ahi;
+ SDValue AloBhi = Bhi;
// Bit cast to 32-bit vectors for MULUDQ
EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
(VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
@@ -16473,11 +16548,15 @@ static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
Bhi = DAG.getBitcast(MulVT, Bhi);
SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
- SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
- SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
-
- AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
- AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
+ // After shifting right const values the result may be all-zero.
+ if (!ISD::isBuildVectorAllZeros(Ahi.getNode())) {
+ AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
+ AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
+ }
+ if (!ISD::isBuildVectorAllZeros(Bhi.getNode())) {
+ AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
+ AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
+ }
SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
@@ -16992,36 +17071,111 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
}
}
- if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
- // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
- Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, dl, VT));
-
- SDValue VSelM = DAG.getConstant(0x80, dl, VT);
- SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
- OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
-
- // r = VSELECT(r, shl(r, 4), a);
- SDValue M = DAG.getNode(ISD::SHL, dl, VT, R, DAG.getConstant(4, dl, VT));
- R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
-
- // a += a
- Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
- OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
- OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
+ if (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget->hasInt256())) {
+ MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
+ unsigned ShiftOpcode = Op->getOpcode();
- // r = VSELECT(r, shl(r, 2), a);
- M = DAG.getNode(ISD::SHL, dl, VT, R, DAG.getConstant(2, dl, VT));
- R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
+ auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
+ // On SSE41 targets we make use of the fact that VSELECT lowers
+ // to PBLENDVB which selects bytes based just on the sign bit.
+ if (Subtarget->hasSSE41()) {
+ V0 = DAG.getBitcast(VT, V0);
+ V1 = DAG.getBitcast(VT, V1);
+ Sel = DAG.getBitcast(VT, Sel);
+ return DAG.getBitcast(SelVT,
+ DAG.getNode(ISD::VSELECT, dl, VT, Sel, V0, V1));
+ }
+ // On pre-SSE41 targets we test for the sign bit by comparing to
+ // zero - a negative value will set all bits of the lanes to true
+ // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
+ SDValue Z = getZeroVector(SelVT, Subtarget, DAG, dl);
+ SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
+ return DAG.getNode(ISD::VSELECT, dl, SelVT, C, V0, V1);
+ };
- // a += a
- Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
- OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
- OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
+ // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
+ // We can safely do this using i16 shifts as we're only interested in
+ // the 3 lower bits of each byte.
+ Amt = DAG.getBitcast(ExtVT, Amt);
+ Amt = DAG.getNode(ISD::SHL, dl, ExtVT, Amt, DAG.getConstant(5, dl, ExtVT));
+ Amt = DAG.getBitcast(VT, Amt);
+
+ if (Op->getOpcode() == ISD::SHL || Op->getOpcode() == ISD::SRL) {
+ // r = VSELECT(r, shift(r, 4), a);
+ SDValue M =
+ DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
+ R = SignBitSelect(VT, Amt, M, R);
+
+ // a += a
+ Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+ // r = VSELECT(r, shift(r, 2), a);
+ M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
+ R = SignBitSelect(VT, Amt, M, R);
+
+ // a += a
+ Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+ // return VSELECT(r, shift(r, 1), a);
+ M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
+ R = SignBitSelect(VT, Amt, M, R);
+ return R;
+ }
- // return VSELECT(r, r+r, a);
- R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
- DAG.getNode(ISD::ADD, dl, VT, R, R), R);
- return R;
+ if (Op->getOpcode() == ISD::SRA) {
+ // For SRA we need to unpack each byte to the higher byte of a i16 vector
+ // so we can correctly sign extend. We don't care what happens to the
+ // lower byte.
+ SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), Amt);
+ SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), Amt);
+ SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), R);
+ SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), R);
+ ALo = DAG.getBitcast(ExtVT, ALo);
+ AHi = DAG.getBitcast(ExtVT, AHi);
+ RLo = DAG.getBitcast(ExtVT, RLo);
+ RHi = DAG.getBitcast(ExtVT, RHi);
+
+ // r = VSELECT(r, shift(r, 4), a);
+ SDValue MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
+ DAG.getConstant(4, dl, ExtVT));
+ SDValue MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
+ DAG.getConstant(4, dl, ExtVT));
+ RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
+ RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
+
+ // a += a
+ ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
+ AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
+
+ // r = VSELECT(r, shift(r, 2), a);
+ MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
+ DAG.getConstant(2, dl, ExtVT));
+ MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
+ DAG.getConstant(2, dl, ExtVT));
+ RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
+ RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
+
+ // a += a
+ ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
+ AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
+
+ // r = VSELECT(r, shift(r, 1), a);
+ MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
+ DAG.getConstant(1, dl, ExtVT));
+ MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
+ DAG.getConstant(1, dl, ExtVT));
+ RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
+ RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
+
+ // Logical shift the result back to the lower byte, leaving a zero upper
+ // byte
+ // meaning that we can safely pack with PACKUSWB.
+ RLo =
+ DAG.getNode(ISD::SRL, dl, ExtVT, RLo, DAG.getConstant(8, dl, ExtVT));
+ RHi =
+ DAG.getNode(ISD::SRL, dl, ExtVT, RHi, DAG.getConstant(8, dl, ExtVT));
+ return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
+ }
}
// It's worth extending once and using the v8i32 shifts for 16-bit types, but
@@ -17055,6 +17209,67 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
}
+ if (VT == MVT::v8i16) {
+ unsigned ShiftOpcode = Op->getOpcode();
+
+ auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
+ // On SSE41 targets we make use of the fact that VSELECT lowers
+ // to PBLENDVB which selects bytes based just on the sign bit.
+ if (Subtarget->hasSSE41()) {
+ MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
+ V0 = DAG.getBitcast(ExtVT, V0);
+ V1 = DAG.getBitcast(ExtVT, V1);
+ Sel = DAG.getBitcast(ExtVT, Sel);
+ return DAG.getBitcast(
+ VT, DAG.getNode(ISD::VSELECT, dl, ExtVT, Sel, V0, V1));
+ }
+ // On pre-SSE41 targets we splat the sign bit - a negative value will
+ // set all bits of the lanes to true and VSELECT uses that in
+ // its OR(AND(V0,C),AND(V1,~C)) lowering.
+ SDValue C =
+ DAG.getNode(ISD::SRA, dl, VT, Sel, DAG.getConstant(15, dl, VT));
+ return DAG.getNode(ISD::VSELECT, dl, VT, C, V0, V1);
+ };
+
+ // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
+ if (Subtarget->hasSSE41()) {
+ // On SSE41 targets we need to replicate the shift mask in both
+ // bytes for PBLENDVB.
+ Amt = DAG.getNode(
+ ISD::OR, dl, VT,
+ DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(4, dl, VT)),
+ DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT)));
+ } else {
+ Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT));
+ }
+
+ // r = VSELECT(r, shift(r, 8), a);
+ SDValue M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(8, dl, VT));
+ R = SignBitSelect(Amt, M, R);
+
+ // a += a
+ Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+ // r = VSELECT(r, shift(r, 4), a);
+ M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
+ R = SignBitSelect(Amt, M, R);
+
+ // a += a
+ Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+ // r = VSELECT(r, shift(r, 2), a);
+ M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
+ R = SignBitSelect(Amt, M, R);
+
+ // a += a
+ Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
+
+ // return VSELECT(r, shift(r, 1), a);
+ M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
+ R = SignBitSelect(Amt, M, R);
+ return R;
+ }
+
// Decompose 256-bit shifts into smaller 128-bit shifts.
if (VT.is256BitVector()) {
unsigned NumElems = VT.getVectorNumElements();
@@ -18290,6 +18505,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::VINSERT: return "X86ISD::VINSERT";
case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
+ case X86ISD::CVTDQ2PD: return "X86ISD::CVTDQ2PD";
case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
case X86ISD::VSHL: return "X86ISD::VSHL";
@@ -18404,6 +18620,9 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::FGETEXP_RND: return "X86ISD::FGETEXP_RND";
case X86ISD::ADDS: return "X86ISD::ADDS";
case X86ISD::SUBS: return "X86ISD::SUBS";
+ case X86ISD::AVG: return "X86ISD::AVG";
+ case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
+ case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
}
return nullptr;
}
@@ -19464,7 +19683,8 @@ X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
assert(!Subtarget->isTargetMachO());
- X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
+ Subtarget->getFrameLowering()->emitStackProbeCall(*BB->getParent(), *BB, MI,
+ DL);
MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
@@ -24019,7 +24239,7 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
- EVT InVT = N0->getValueType(0);
+ EVT InVT = N0.getValueType();
EVT InSVT = InVT.getScalarType();
SDLoc DL(N);
@@ -24037,7 +24257,7 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
}
if (!DCI.isBeforeLegalizeOps()) {
- if (N0.getValueType() == MVT::i1) {
+ if (InVT == MVT::i1) {
SDValue Zero = DAG.getConstant(0, DL, VT);
SDValue AllOnes =
DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), DL, VT);
@@ -24048,7 +24268,7 @@ static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
if (VT.isVector()) {
auto ExtendToVec128 = [&DAG](SDLoc DL, SDValue N) {
- EVT InVT = N->getValueType(0);
+ EVT InVT = N.getValueType();
EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
128 / InVT.getScalarSizeInBits());
SmallVector<SDValue, 8> Opnds(128 / InVT.getSizeInBits(),
@@ -24470,18 +24690,19 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
// First try to optimize away the conversion entirely when it's
// conditionally from a constant. Vectors only.
- SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
- if (Res != SDValue())
+ if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG))
return Res;
// Now move on to more general possibilities.
SDValue Op0 = N->getOperand(0);
EVT InVT = Op0->getValueType(0);
- // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
- if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
+ // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
+ // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
+ if (InVT == MVT::v8i8 || InVT == MVT::v4i8 ||
+ InVT == MVT::v8i16 || InVT == MVT::v4i16) {
SDLoc dl(N);
- MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
+ MVT DstVT = MVT::getVectorVT(MVT::i32, InVT.getVectorNumElements());
SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
}
@@ -24490,7 +24711,7 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
// a 32-bit target where SSE doesn't support i64->FP operations.
if (Op0.getOpcode() == ISD::LOAD) {
LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
- EVT VT = Ld->getValueType(0);
+ EVT LdVT = Ld->getValueType(0);
// This transformation is not supported if the result type is f16
if (N->getValueType(0) == MVT::f16)
@@ -24498,9 +24719,9 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
- !Subtarget->is64Bit() && VT == MVT::i64) {
+ !Subtarget->is64Bit() && LdVT == MVT::i64) {
SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
- SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
+ SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
return FILDChain;
}
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index b5d062f72b24..9c98333776cf 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -218,7 +218,8 @@ namespace llvm {
// Integer add/sub with signed saturation.
ADDS,
SUBS,
-
+ // Unsigned Integer average
+ AVG,
/// Integer horizontal add.
HADD,
@@ -293,6 +294,9 @@ namespace llvm {
// Vector FP round.
VFPROUND,
+ // Vector signed integer to double.
+ CVTDQ2PD,
+
// 128-bit vector logical left / right shift
VSHLDQ, VSRLDQ,
@@ -417,6 +421,10 @@ namespace llvm {
COMPRESS,
EXPAND,
+ //Convert Unsigned/Integer to Scalar Floating-Point Value
+ //with rounding mode
+ SINT_TO_FP_RND,
+ UINT_TO_FP_RND,
// Save xmm argument registers to the stack, according to %al. An operator
// is needed so that this can be expanded with control flow.
VASTART_SAVE_XMM_REGS,
@@ -508,7 +516,7 @@ namespace llvm {
// have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
// thought as target memory ops!
};
- }
+ } // namespace X86ISD
/// Define some predicates that are used for node matching.
namespace X86 {
@@ -575,7 +583,7 @@ namespace llvm {
TO_ZERO = 3,
CUR_DIRECTION = 4
};
- }
+ } // namespace X86
//===--------------------------------------------------------------------===//
// X86 Implementation of the TargetLowering interface
@@ -1112,6 +1120,6 @@ namespace llvm {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
}
-}
+} // namespace llvm
#endif // X86ISELLOWERING_H
diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td
index c1d0aef07118..de6a83506b28 100644
--- a/lib/Target/X86/X86InstrAVX512.td
+++ b/lib/Target/X86/X86InstrAVX512.td
@@ -1058,118 +1058,87 @@ def : Pat<(v8i64 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
(VPERMILPDZri VR512:$src1, imm:$imm)>;
// -- VPERM2I - 3 source operands form --
-multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
- PatFrag mem_frag, X86MemOperand x86memop,
- SDNode OpNode, ValueType OpVT, RegisterClass KRC> {
+multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, X86VectorVTInfo _> {
let Constraints = "$src1 = $dst" in {
- def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, RC:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set RC:$dst,
- (OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
- EVEX_4V;
-
- def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst {${mask}}|"
- "$dst {${mask}}, $src2, $src3}"),
- [(set RC:$dst, (OpVT (vselect KRC:$mask,
- (OpNode RC:$src1, RC:$src2,
- RC:$src3),
- RC:$src1)))]>,
- EVEX_4V, EVEX_K;
-
- let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
- def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
- (ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst {${mask}} {z} |",
- "$dst {${mask}} {z}, $src2, $src3}"),
- [(set RC:$dst, (OpVT (vselect KRC:$mask,
- (OpNode RC:$src1, RC:$src2,
- RC:$src3),
- (OpVT (bitconvert
- (v16i32 immAllZerosV))))))]>,
- EVEX_4V, EVEX_KZ;
+ defm rr: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.RC:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>, EVEX_4V,
+ AVX5128IBase;
- def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, RC:$src2, x86memop:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
- [(set RC:$dst,
- (OpVT (OpNode RC:$src1, RC:$src2,
- (mem_frag addr:$src3))))]>, EVEX_4V;
+ let mayLoad = 1 in
+ defm rm: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.MemOp:$src3),
+ OpcodeStr, "$src3, $src2", "$src2, $src3",
+ (_.VT (OpNode _.RC:$src1, _.RC:$src2,
+ (_.VT (bitconvert (_.LdFrag addr:$src3)))))>,
+ EVEX_4V, AVX5128IBase;
+ }
+}
+multiclass avx512_perm_3src_mb<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, X86VectorVTInfo _> {
+ let mayLoad = 1, Constraints = "$src1 = $dst" in
+ defm rmb: AVX512_maskable_3src<opc, MRMSrcMem, _, (outs _.RC:$dst),
+ (ins _.RC:$src2, _.ScalarMemOp:$src3),
+ OpcodeStr, !strconcat("${src3}", _.BroadcastStr,", $src2"),
+ !strconcat("$src2, ${src3}", _.BroadcastStr ),
+ (_.VT (OpNode _.RC:$src1,
+ _.RC:$src2,(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3)))))>,
+ AVX5128IBase, EVEX_4V, EVEX_B;
+}
- def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst {${mask}}|"
- "$dst {${mask}}, $src2, $src3}"),
- [(set RC:$dst,
- (OpVT (vselect KRC:$mask,
- (OpNode RC:$src1, RC:$src2,
- (mem_frag addr:$src3)),
- RC:$src1)))]>,
- EVEX_4V, EVEX_K;
-
- let AddedComplexity = 10 in // Prefer over the rrkz variant
- def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
- (ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
- !strconcat(OpcodeStr,
- "\t{$src3, $src2, $dst {${mask}} {z}|"
- "$dst {${mask}} {z}, $src2, $src3}"),
- [(set RC:$dst,
- (OpVT (vselect KRC:$mask,
- (OpNode RC:$src1, RC:$src2,
- (mem_frag addr:$src3)),
- (OpVT (bitconvert
- (v16i32 immAllZerosV))))))]>,
- EVEX_4V, EVEX_KZ;
+multiclass avx512_perm_3src_sizes<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, AVX512VLVectorVTInfo VTInfo> {
+ let Predicates = [HasAVX512] in
+ defm NAME: avx512_perm_3src<opc, OpcodeStr, OpNode, VTInfo.info512>,
+ avx512_perm_3src_mb<opc, OpcodeStr, OpNode, VTInfo.info512>, EVEX_V512;
+ let Predicates = [HasVLX] in {
+ defm NAME#128: avx512_perm_3src<opc, OpcodeStr, OpNode, VTInfo.info128>,
+ avx512_perm_3src_mb<opc, OpcodeStr, OpNode, VTInfo.info128>,
+ EVEX_V128;
+ defm NAME#256: avx512_perm_3src<opc, OpcodeStr, OpNode, VTInfo.info256>,
+ avx512_perm_3src_mb<opc, OpcodeStr, OpNode, VTInfo.info256>,
+ EVEX_V256;
}
}
-defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, loadv16i32,
- i512mem, X86VPermiv3, v16i32, VK16WM>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, loadv8i64,
- i512mem, X86VPermiv3, v8i64, VK8WM>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, loadv16f32,
- i512mem, X86VPermiv3, v16f32, VK16WM>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, loadv8f64,
- i512mem, X86VPermiv3, v8f64, VK8WM>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-
-multiclass avx512_perm_table_3src<bits<8> opc, string Suffix, RegisterClass RC,
- PatFrag mem_frag, X86MemOperand x86memop,
- SDNode OpNode, ValueType OpVT, RegisterClass KRC,
- ValueType MaskVT, RegisterClass MRC> :
- avx512_perm_3src<opc, "vpermt2"##Suffix, RC, mem_frag, x86memop, OpNode,
- OpVT, KRC> {
- def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
- VR512:$idx, VR512:$src1, VR512:$src2, -1)),
- (!cast<Instruction>(NAME#rr) VR512:$src1, VR512:$idx, VR512:$src2)>;
-
- def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
- VR512:$idx, VR512:$src1, VR512:$src2, MRC:$mask)),
- (!cast<Instruction>(NAME#rrk) VR512:$src1,
- (MaskVT (COPY_TO_REGCLASS MRC:$mask, KRC)), VR512:$idx, VR512:$src2)>;
-}
-
-defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, loadv16i32, i512mem,
- X86VPermv3, v16i32, VK16WM, v16i1, GR16>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, loadv8i64, i512mem,
- X86VPermv3, v8i64, VK8WM, v8i1, GR8>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
-defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, loadv16f32, i512mem,
- X86VPermv3, v16f32, VK16WM, v16i1, GR16>,
- EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, loadv8f64, i512mem,
- X86VPermv3, v8f64, VK8WM, v8i1, GR8>,
- EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
+multiclass avx512_perm_3src_sizes_w<bits<8> opc, string OpcodeStr,
+ SDNode OpNode, AVX512VLVectorVTInfo VTInfo> {
+ let Predicates = [HasBWI] in
+ defm NAME: avx512_perm_3src<opc, OpcodeStr, OpNode, VTInfo.info512>,
+ avx512_perm_3src_mb<opc, OpcodeStr, OpNode, VTInfo.info512>,
+ EVEX_V512;
+ let Predicates = [HasBWI, HasVLX] in {
+ defm NAME#128: avx512_perm_3src<opc, OpcodeStr, OpNode, VTInfo.info128>,
+ avx512_perm_3src_mb<opc, OpcodeStr, OpNode, VTInfo.info128>,
+ EVEX_V128;
+ defm NAME#256: avx512_perm_3src<opc, OpcodeStr, OpNode, VTInfo.info256>,
+ avx512_perm_3src_mb<opc, OpcodeStr, OpNode, VTInfo.info256>,
+ EVEX_V256;
+ }
+}
+defm VPERMI2D : avx512_perm_3src_sizes<0x76, "vpermi2d", X86VPermiv3,
+ avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
+defm VPERMI2Q : avx512_perm_3src_sizes<0x76, "vpermi2q", X86VPermiv3,
+ avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPERMI2PS : avx512_perm_3src_sizes<0x77, "vpermi2ps", X86VPermiv3,
+ avx512vl_f32_info>, EVEX_CD8<32, CD8VF>;
+defm VPERMI2PD : avx512_perm_3src_sizes<0x77, "vpermi2pd", X86VPermiv3,
+ avx512vl_f64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VPERMT2D : avx512_perm_3src_sizes<0x7E, "vpermt2d", X86VPermv3,
+ avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
+defm VPERMT2Q : avx512_perm_3src_sizes<0x7E, "vpermt2q", X86VPermv3,
+ avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
+defm VPERMT2PS : avx512_perm_3src_sizes<0x7F, "vpermt2ps", X86VPermv3,
+ avx512vl_f32_info>, EVEX_CD8<32, CD8VF>;
+defm VPERMT2PD : avx512_perm_3src_sizes<0x7F, "vpermt2pd", X86VPermv3,
+ avx512vl_f64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
+
+defm VPERMT2W : avx512_perm_3src_sizes_w<0x7D, "vpermt2w", X86VPermv3,
+ avx512vl_i16_info>, VEX_W, EVEX_CD8<16, CD8VF>;
+defm VPERMI2W : avx512_perm_3src_sizes_w<0x75, "vpermi2w", X86VPermiv3,
+ avx512vl_i16_info>, VEX_W, EVEX_CD8<16, CD8VF>;
//===----------------------------------------------------------------------===//
// AVX-512 - BLEND using mask
@@ -2044,11 +2013,11 @@ defm : avx512_binop_pat<xor, KXORWrr>;
def : Pat<(xor (xor VK16:$src1, VK16:$src2), (v16i1 immAllOnesV)),
(KXNORWrr VK16:$src1, VK16:$src2)>;
def : Pat<(xor (xor VK8:$src1, VK8:$src2), (v8i1 immAllOnesV)),
- (KXNORBrr VK8:$src1, VK8:$src2)>;
+ (KXNORBrr VK8:$src1, VK8:$src2)>, Requires<[HasDQI]>;
def : Pat<(xor (xor VK32:$src1, VK32:$src2), (v32i1 immAllOnesV)),
- (KXNORDrr VK32:$src1, VK32:$src2)>;
+ (KXNORDrr VK32:$src1, VK32:$src2)>, Requires<[HasBWI]>;
def : Pat<(xor (xor VK64:$src1, VK64:$src2), (v64i1 immAllOnesV)),
- (KXNORQrr VK64:$src1, VK64:$src2)>;
+ (KXNORQrr VK64:$src1, VK64:$src2)>, Requires<[HasBWI]>;
let Predicates = [NoDQI] in
def : Pat<(xor (xor VK8:$src1, VK8:$src2), (v8i1 immAllOnesV)),
@@ -3157,7 +3126,8 @@ defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmull", mul,
SSE_INTALU_ITINS_P, HasBWI, 1>;
defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmull", mul,
SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD;
-
+defm VPAVG : avx512_binop_rm_vl_bw<0xE0, 0xE3, "vpavg", X86avg,
+ SSE_INTALU_ITINS_P, HasBWI, 1>;
multiclass avx512_binop_all<bits<8> opc, string OpcodeStr, OpndItins itins,
SDNode OpNode, bit IsCommutable = 0> {
@@ -3278,30 +3248,6 @@ defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminu", X86umin,
defm VPMINU : avx512_binop_rm_vl_dq<0x3B, 0x3B, "vpminu", X86umin,
SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
-def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
- (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
- (VPMAXSDZrr VR512:$src1, VR512:$src2)>;
-def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
- (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
- (VPMAXUDZrr VR512:$src1, VR512:$src2)>;
-def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
- (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
- (VPMAXSQZrr VR512:$src1, VR512:$src2)>;
-def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
- (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
- (VPMAXUQZrr VR512:$src1, VR512:$src2)>;
-def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
- (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
- (VPMINSDZrr VR512:$src1, VR512:$src2)>;
-def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
- (v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
- (VPMINUDZrr VR512:$src1, VR512:$src2)>;
-def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
- (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
- (VPMINSQZrr VR512:$src1, VR512:$src2)>;
-def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
- (v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
- (VPMINUQZrr VR512:$src1, VR512:$src2)>;
//===----------------------------------------------------------------------===//
// AVX-512 - Unpack Instructions
//===----------------------------------------------------------------------===//
@@ -4191,29 +4137,72 @@ defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd", X86Fnmsub, FR64X,
// AVX-512 Scalar convert from sign integer to float/double
//===----------------------------------------------------------------------===//
-multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
- X86MemOperand x86memop, string asm> {
-let hasSideEffects = 0 in {
- def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
+multiclass avx512_vcvtsi<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
+ X86VectorVTInfo DstVT, X86MemOperand x86memop,
+ PatFrag ld_frag, string asm> {
+ let hasSideEffects = 0 in {
+ def rr : SI<opc, MRMSrcReg, (outs DstVT.FRC:$dst),
+ (ins DstVT.FRC:$src1, SrcRC:$src),
!strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
EVEX_4V;
- let mayLoad = 1 in
- def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
- (ins DstRC:$src1, x86memop:$src),
+ let mayLoad = 1 in
+ def rm : SI<opc, MRMSrcMem, (outs DstVT.FRC:$dst),
+ (ins DstVT.FRC:$src1, x86memop:$src),
!strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
EVEX_4V;
-} // hasSideEffects = 0
+ } // hasSideEffects = 0
+ let isCodeGenOnly = 1 in {
+ def rr_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst),
+ (ins DstVT.RC:$src1, SrcRC:$src2),
+ !strconcat(asm,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set DstVT.RC:$dst,
+ (OpNode (DstVT.VT DstVT.RC:$src1),
+ SrcRC:$src2,
+ (i32 FROUND_CURRENT)))]>, EVEX_4V;
+
+ def rm_Int : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst),
+ (ins DstVT.RC:$src1, x86memop:$src2),
+ !strconcat(asm,"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ [(set DstVT.RC:$dst,
+ (OpNode (DstVT.VT DstVT.RC:$src1),
+ (ld_frag addr:$src2),
+ (i32 FROUND_CURRENT)))]>, EVEX_4V;
+ }//isCodeGenOnly = 1
+}
+
+multiclass avx512_vcvtsi_round<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
+ X86VectorVTInfo DstVT, string asm> {
+ def rrb_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst),
+ (ins DstVT.RC:$src1, SrcRC:$src2, AVX512RC:$rc),
+ !strconcat(asm,
+ "\t{$src2, $rc, $src1, $dst|$dst, $src1, $rc, $src2}"),
+ [(set DstVT.RC:$dst,
+ (OpNode (DstVT.VT DstVT.RC:$src1),
+ SrcRC:$src2,
+ (i32 imm:$rc)))]>, EVEX_4V, EVEX_B, EVEX_RC;
+}
+
+multiclass avx512_vcvtsi_common<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
+ X86VectorVTInfo DstVT, X86MemOperand x86memop,
+ PatFrag ld_frag, string asm> {
+ defm NAME : avx512_vcvtsi_round<opc, OpNode, SrcRC, DstVT, asm>,
+ avx512_vcvtsi<opc, OpNode, SrcRC, DstVT, x86memop, ld_frag, asm>,
+ VEX_LIG;
}
let Predicates = [HasAVX512] in {
-defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">,
- XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}">,
- XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
-defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}">,
- XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}">,
- XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
+defm VCVTSI2SSZ : avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR32,
+ v4f32x_info, i32mem, loadi32, "cvtsi2ss{l}">,
+ XS, EVEX_CD8<32, CD8VT1>;
+defm VCVTSI642SSZ: avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR64,
+ v4f32x_info, i64mem, loadi64, "cvtsi2ss{q}">,
+ XS, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VCVTSI2SDZ : avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR32,
+ v2f64x_info, i32mem, loadi32, "cvtsi2sd{l}">,
+ XD, EVEX_CD8<32, CD8VT1>;
+defm VCVTSI642SDZ: avx512_vcvtsi_common<0x2A, X86SintToFpRnd, GR64,
+ v2f64x_info, i64mem, loadi64, "cvtsi2sd{q}">,
+ XD, VEX_W, EVEX_CD8<64, CD8VT1>;
def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
(VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
@@ -4233,14 +4222,18 @@ def : Pat<(f64 (sint_to_fp GR32:$src)),
def : Pat<(f64 (sint_to_fp GR64:$src)),
(VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
-defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}">,
- XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}">,
- XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
-defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}">,
+defm VCVTUSI2SSZ : avx512_vcvtsi_common<0x7B, X86SuintToFpRnd, GR32,
+ v4f32x_info, i32mem, loadi32,
+ "cvtusi2ss{l}">, XS, EVEX_CD8<32, CD8VT1>;
+defm VCVTUSI642SSZ : avx512_vcvtsi_common<0x7B, X86SuintToFpRnd, GR64,
+ v4f32x_info, i64mem, loadi64, "cvtusi2ss{q}">,
+ XS, VEX_W, EVEX_CD8<64, CD8VT1>;
+defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, X86SuintToFpRnd, GR32, v2f64x_info,
+ i32mem, loadi32, "cvtusi2sd{l}">,
XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
-defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}">,
- XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
+defm VCVTUSI642SDZ : avx512_vcvtsi_common<0x7B, X86SuintToFpRnd, GR64,
+ v2f64x_info, i64mem, loadi64, "cvtusi2sd{q}">,
+ XD, VEX_W, EVEX_CD8<64, CD8VT1>;
def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
(VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
@@ -4321,18 +4314,9 @@ let isCodeGenOnly = 1 in {
int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
- defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
- int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
- SSE_CVT_Scalar, 0>, XS, EVEX_4V;
- defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
- int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
- SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
SSE_CVT_Scalar, 0>, XD, EVEX_4V;
- defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
- int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
- SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
} // isCodeGenOnly = 1
// Convert float/double to signed/unsigned int 32/64 with truncation
diff --git a/lib/Target/X86/X86InstrBuilder.h b/lib/Target/X86/X86InstrBuilder.h
index 2056056d23a5..eb4dc48a7a65 100644
--- a/lib/Target/X86/X86InstrBuilder.h
+++ b/lib/Target/X86/X86InstrBuilder.h
@@ -179,6 +179,6 @@ addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI,
.addConstantPoolIndex(CPI, 0, OpFlags).addReg(0);
}
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td
index dfe58ef8067b..16ae77dd81a3 100644
--- a/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -72,6 +72,9 @@ def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
def X86cmps : SDNode<"X86ISD::FSETCC", SDTX86Cmps>;
//def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
+def X86cvtdq2pd: SDNode<"X86ISD::CVTDQ2PD",
+ SDTypeProfile<1, 1, [SDTCisVT<0, v2f64>,
+ SDTCisVT<1, v4i32>]>>;
def X86pshufb : SDNode<"X86ISD::PSHUFB",
SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>]>>;
@@ -184,6 +187,7 @@ def X86addus : SDNode<"X86ISD::ADDUS", SDTIntBinOp>;
def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
def X86adds : SDNode<"X86ISD::ADDS", SDTIntBinOp>;
def X86subs : SDNode<"X86ISD::SUBS", SDTIntBinOp>;
+def X86avg : SDNode<"X86ISD::AVG" , SDTIntBinOp>;
def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
@@ -350,6 +354,12 @@ def X86expand : SDNode<"X86ISD::EXPAND", SDTypeProfile<1, 3,
[SDTCisSameAs<0, 3>,
SDTCisVec<3>, SDTCisVec<1>, SDTCisInt<1>]>, []>;
+def SDTintToFPRound: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>,
+ SDTCisSameAs<0,1>, SDTCisInt<2>, SDTCisInt<3>]>;
+
+def X86SintToFpRnd : SDNode<"X86ISD::SINT_TO_FP_RND", SDTintToFPRound>;
+def X86SuintToFpRnd : SDNode<"X86ISD::UINT_TO_FP_RND", SDTintToFPRound>;
+
//===----------------------------------------------------------------------===//
// SSE Complex Patterns
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 6b7a9299dcfb..4aa0ae6f1959 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -3456,11 +3456,11 @@ bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
return !isPredicated(MI);
}
-bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
+bool X86InstrInfo::AnalyzeBranchImpl(
+ MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
+
// Start from the bottom of the block and work up, examining the
// terminator instructions.
MachineBasicBlock::iterator I = MBB.end();
@@ -3558,6 +3558,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
FBB = TBB;
TBB = I->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(BranchCode));
+ CondBranches.push_back(I);
continue;
}
@@ -3595,11 +3596,90 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
// Update the MachineOperand.
Cond[0].setImm(BranchCode);
+ CondBranches.push_back(I);
}
return false;
}
+bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ SmallVector<MachineInstr *, 4> CondBranches;
+ return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
+}
+
+bool X86InstrInfo::AnalyzeBranchPredicate(MachineBasicBlock &MBB,
+ MachineBranchPredicate &MBP,
+ bool AllowModify) const {
+ using namespace std::placeholders;
+
+ SmallVector<MachineOperand, 4> Cond;
+ SmallVector<MachineInstr *, 4> CondBranches;
+ if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
+ AllowModify))
+ return true;
+
+ if (Cond.size() != 1)
+ return true;
+
+ assert(MBP.TrueDest && "expected!");
+
+ if (!MBP.FalseDest)
+ MBP.FalseDest = MBB.getNextNode();
+
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+
+ MachineInstr *ConditionDef = nullptr;
+ bool SingleUseCondition = true;
+
+ for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) {
+ if (I->modifiesRegister(X86::EFLAGS, TRI)) {
+ ConditionDef = &*I;
+ break;
+ }
+
+ if (I->readsRegister(X86::EFLAGS, TRI))
+ SingleUseCondition = false;
+ }
+
+ if (!ConditionDef)
+ return true;
+
+ if (SingleUseCondition) {
+ for (auto *Succ : MBB.successors())
+ if (Succ->isLiveIn(X86::EFLAGS))
+ SingleUseCondition = false;
+ }
+
+ MBP.ConditionDef = ConditionDef;
+ MBP.SingleUseCondition = SingleUseCondition;
+
+ // Currently we only recognize the simple pattern:
+ //
+ // test %reg, %reg
+ // je %label
+ //
+ const unsigned TestOpcode =
+ Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
+
+ if (ConditionDef->getOpcode() == TestOpcode &&
+ ConditionDef->getNumOperands() == 3 &&
+ ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
+ (Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
+ MBP.LHS = ConditionDef->getOperand(0);
+ MBP.RHS = MachineOperand::CreateImm(0);
+ MBP.Predicate = Cond[0].getImm() == X86::COND_NE
+ ? MachineBranchPredicate::PRED_NE
+ : MachineBranchPredicate::PRED_EQ;
+ return false;
+ }
+
+ return true;
+}
+
unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
@@ -3622,8 +3702,7 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
unsigned
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
@@ -3671,7 +3750,7 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
bool X86InstrInfo::
canInsertSelect(const MachineBasicBlock &MBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg,
int &CondCycles, int &TrueCycles, int &FalseCycles) const {
// Not all subtargets have cmov instructions.
@@ -3708,8 +3787,7 @@ canInsertSelect(const MachineBasicBlock &MBB,
void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
- unsigned DstReg,
- const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
assert(Cond.size() == 1 && "Invalid Cond array");
@@ -3967,6 +4045,36 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
}
}
+bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
+ const MCInstrDesc &Desc = MemOp->getDesc();
+ int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags, MemOp->getOpcode());
+ if (MemRefBegin < 0)
+ return false;
+
+ MemRefBegin += X86II::getOperandBias(Desc);
+
+ BaseReg = MemOp->getOperand(MemRefBegin + X86::AddrBaseReg).getReg();
+ if (MemOp->getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
+ return false;
+
+ if (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
+ X86::NoRegister)
+ return false;
+
+ const MachineOperand &DispMO = MemOp->getOperand(MemRefBegin + X86::AddrDisp);
+
+ // Displacement can be symbolic
+ if (!DispMO.isImm())
+ return false;
+
+ Offset = DispMO.getImm();
+
+ return (MemOp->getOperand(MemRefBegin + X86::AddrIndexReg).getReg() ==
+ X86::NoRegister);
+}
+
static unsigned getStoreRegOpcode(unsigned SrcReg,
const TargetRegisterClass *RC,
bool isStackAligned,
@@ -6219,13 +6327,217 @@ bool X86InstrInfo::isHighLatencyDef(int opc) const {
}
bool X86InstrInfo::
-hasHighOperandLatency(const InstrItineraryData *ItinData,
+hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const {
return isHighLatencyDef(DefMI->getOpcode());
}
+/// If the input instruction is part of a chain of dependent ops that are
+/// suitable for reassociation, return the earlier instruction in the sequence
+/// that defines its first operand, otherwise return a nullptr.
+/// If the instruction's operands must be commuted to be considered a
+/// reassociation candidate, Commuted will be set to true.
+static MachineInstr *isReassocCandidate(const MachineInstr &Inst,
+ unsigned AssocOpcode,
+ bool checkPrevOneUse,
+ bool &Commuted) {
+ if (Inst.getOpcode() != AssocOpcode)
+ return nullptr;
+
+ MachineOperand Op1 = Inst.getOperand(1);
+ MachineOperand Op2 = Inst.getOperand(2);
+
+ const MachineBasicBlock *MBB = Inst.getParent();
+ const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+
+ // We need virtual register definitions.
+ MachineInstr *MI1 = nullptr;
+ MachineInstr *MI2 = nullptr;
+ if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg()))
+ MI1 = MRI.getUniqueVRegDef(Op1.getReg());
+ if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg()))
+ MI2 = MRI.getUniqueVRegDef(Op2.getReg());
+
+ // And they need to be in the trace (otherwise, they won't have a depth).
+ if (!MI1 || !MI2 || MI1->getParent() != MBB || MI2->getParent() != MBB)
+ return nullptr;
+
+ Commuted = false;
+ if (MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode) {
+ std::swap(MI1, MI2);
+ Commuted = true;
+ }
+
+ // Avoid reassociating operands when it won't provide any benefit. If both
+ // operands are produced by instructions of this type, we may already
+ // have the optimal sequence.
+ if (MI2->getOpcode() == AssocOpcode)
+ return nullptr;
+
+ // The instruction must only be used by the other instruction that we
+ // reassociate with.
+ if (checkPrevOneUse && !MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg()))
+ return nullptr;
+
+ // We must match a simple chain of dependent ops.
+ // TODO: This check is not necessary for the earliest instruction in the
+ // sequence. Instead of a sequence of 3 dependent instructions with the same
+ // opcode, we only need to find a sequence of 2 dependent instructions with
+ // the same opcode plus 1 other instruction that adds to the height of the
+ // trace.
+ if (MI1->getOpcode() != AssocOpcode)
+ return nullptr;
+
+ return MI1;
+}
+
+/// Select a pattern based on how the operands of each associative operation
+/// need to be commuted.
+static MachineCombinerPattern::MC_PATTERN getPattern(bool CommutePrev,
+ bool CommuteRoot) {
+ if (CommutePrev) {
+ if (CommuteRoot)
+ return MachineCombinerPattern::MC_REASSOC_XA_YB;
+ return MachineCombinerPattern::MC_REASSOC_XA_BY;
+ } else {
+ if (CommuteRoot)
+ return MachineCombinerPattern::MC_REASSOC_AX_YB;
+ return MachineCombinerPattern::MC_REASSOC_AX_BY;
+ }
+}
+
+bool X86InstrInfo::getMachineCombinerPatterns(MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns) const {
+ if (!Root.getParent()->getParent()->getTarget().Options.UnsafeFPMath)
+ return false;
+
+ // TODO: There are many more associative instruction types to match:
+ // 1. Other forms of scalar FP add (non-AVX)
+ // 2. Other data types (double, integer, vectors)
+ // 3. Other math / logic operations (mul, and, or)
+ unsigned AssocOpcode = X86::VADDSSrr;
+
+ // TODO: There is nothing x86-specific here except the instruction type.
+ // This logic could be hoisted into the machine combiner pass itself.
+ bool CommuteRoot;
+ if (MachineInstr *Prev = isReassocCandidate(Root, AssocOpcode, true,
+ CommuteRoot)) {
+ bool CommutePrev;
+ if (isReassocCandidate(*Prev, AssocOpcode, false, CommutePrev)) {
+ // We found a sequence of instructions that may be suitable for a
+ // reassociation of operands to increase ILP.
+ Patterns.push_back(getPattern(CommutePrev, CommuteRoot));
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// Attempt the following reassociation to reduce critical path length:
+/// B = A op X (Prev)
+/// C = B op Y (Root)
+/// ===>
+/// B = X op Y
+/// C = A op B
+static void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
+ MachineCombinerPattern::MC_PATTERN Pattern,
+ SmallVectorImpl<MachineInstr *> &InsInstrs,
+ SmallVectorImpl<MachineInstr *> &DelInstrs,
+ DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) {
+ MachineFunction *MF = Root.getParent()->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
+ const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
+ const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
+
+ // This array encodes the operand index for each parameter because the
+ // operands may be commuted. Each row corresponds to a pattern value,
+ // and each column specifies the index of A, B, X, Y.
+ unsigned OpIdx[4][4] = {
+ { 1, 1, 2, 2 },
+ { 1, 2, 2, 1 },
+ { 2, 1, 1, 2 },
+ { 2, 2, 1, 1 }
+ };
+
+ MachineOperand &OpA = Prev.getOperand(OpIdx[Pattern][0]);
+ MachineOperand &OpB = Root.getOperand(OpIdx[Pattern][1]);
+ MachineOperand &OpX = Prev.getOperand(OpIdx[Pattern][2]);
+ MachineOperand &OpY = Root.getOperand(OpIdx[Pattern][3]);
+ MachineOperand &OpC = Root.getOperand(0);
+
+ unsigned RegA = OpA.getReg();
+ unsigned RegB = OpB.getReg();
+ unsigned RegX = OpX.getReg();
+ unsigned RegY = OpY.getReg();
+ unsigned RegC = OpC.getReg();
+
+ if (TargetRegisterInfo::isVirtualRegister(RegA))
+ MRI.constrainRegClass(RegA, RC);
+ if (TargetRegisterInfo::isVirtualRegister(RegB))
+ MRI.constrainRegClass(RegB, RC);
+ if (TargetRegisterInfo::isVirtualRegister(RegX))
+ MRI.constrainRegClass(RegX, RC);
+ if (TargetRegisterInfo::isVirtualRegister(RegY))
+ MRI.constrainRegClass(RegY, RC);
+ if (TargetRegisterInfo::isVirtualRegister(RegC))
+ MRI.constrainRegClass(RegC, RC);
+
+ // Create a new virtual register for the result of (X op Y) instead of
+ // recycling RegB because the MachineCombiner's computation of the critical
+ // path requires a new register definition rather than an existing one.
+ unsigned NewVR = MRI.createVirtualRegister(RC);
+ InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
+
+ unsigned Opcode = Root.getOpcode();
+ bool KillA = OpA.isKill();
+ bool KillX = OpX.isKill();
+ bool KillY = OpY.isKill();
+
+ // Create new instructions for insertion.
+ MachineInstrBuilder MIB1 =
+ BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
+ .addReg(RegX, getKillRegState(KillX))
+ .addReg(RegY, getKillRegState(KillY));
+ InsInstrs.push_back(MIB1);
+
+ MachineInstrBuilder MIB2 =
+ BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
+ .addReg(RegA, getKillRegState(KillA))
+ .addReg(NewVR, getKillRegState(true));
+ InsInstrs.push_back(MIB2);
+
+ // Record old instructions for deletion.
+ DelInstrs.push_back(&Prev);
+ DelInstrs.push_back(&Root);
+}
+
+void X86InstrInfo::genAlternativeCodeSequence(
+ MachineInstr &Root,
+ MachineCombinerPattern::MC_PATTERN Pattern,
+ SmallVectorImpl<MachineInstr *> &InsInstrs,
+ SmallVectorImpl<MachineInstr *> &DelInstrs,
+ DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
+ MachineRegisterInfo &MRI = Root.getParent()->getParent()->getRegInfo();
+
+ // Select the previous instruction in the sequence based on the input pattern.
+ MachineInstr *Prev = nullptr;
+ if (Pattern == MachineCombinerPattern::MC_REASSOC_AX_BY ||
+ Pattern == MachineCombinerPattern::MC_REASSOC_XA_BY)
+ Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
+ else if (Pattern == MachineCombinerPattern::MC_REASSOC_AX_YB ||
+ Pattern == MachineCombinerPattern::MC_REASSOC_XA_YB)
+ Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
+ else
+ llvm_unreachable("Unknown pattern for machine combiner");
+
+ reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
+ return;
+}
+
namespace {
/// Create Global Base Reg pass. This initializes the PIC
/// global base register for x86-32.
@@ -6292,7 +6604,7 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char CGBR::ID = 0;
FunctionPass*
@@ -6404,7 +6716,7 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
};
-}
+} // namespace
char LDTLSCleanup::ID = 0;
FunctionPass*
diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h
index ac1b2d4fedc6..4912951140d9 100644
--- a/lib/Target/X86/X86InstrInfo.h
+++ b/lib/Target/X86/X86InstrInfo.h
@@ -26,6 +26,19 @@ namespace llvm {
class X86RegisterInfo;
class X86Subtarget;
+ namespace MachineCombinerPattern {
+ enum MC_PATTERN : int {
+ // These are commutative variants for reassociating a computation chain
+ // of the form:
+ // B = A op X (Prev)
+ // C = B op Y (Root)
+ MC_REASSOC_AX_BY = 0,
+ MC_REASSOC_AX_YB = 1,
+ MC_REASSOC_XA_BY = 2,
+ MC_REASSOC_XA_YB = 3,
+ };
+ } // end namespace MachineCombinerPattern
+
namespace X86 {
// X86 specific condition code. These correspond to X86_*_COND in
// X86InstrInfo.td. They must be kept in synch.
@@ -77,7 +90,7 @@ namespace X86 {
/// GetOppositeBranchCondition - Return the inverse of the specified cond,
/// e.g. turning COND_E to COND_NE.
CondCode GetOppositeBranchCondition(CondCode CC);
-} // end namespace X86;
+} // namespace X86
/// isGlobalStubReference - Return true if the specified TargetFlag operand is
@@ -166,6 +179,12 @@ class X86InstrInfo final : public X86GenInstrInfo {
virtual void anchor();
+ bool AnalyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ SmallVectorImpl<MachineInstr *> &CondBranches,
+ bool AllowModify) const;
+
public:
explicit X86InstrInfo(X86Subtarget &STI);
@@ -254,18 +273,23 @@ public:
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
+
+ bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const override;
+ bool AnalyzeBranchPredicate(MachineBasicBlock &MBB,
+ TargetInstrInfo::MachineBranchPredicate &MBP,
+ bool AllowModify = false) const override;
+
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
- bool canInsertSelect(const MachineBasicBlock&,
- const SmallVectorImpl<MachineOperand> &Cond,
+ bool canInsertSelect(const MachineBasicBlock&, ArrayRef<MachineOperand> Cond,
unsigned, unsigned, int&, int&, int&) const override;
void insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
- unsigned DstReg,
- const SmallVectorImpl<MachineOperand> &Cond,
+ unsigned DstReg, ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg) const override;
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
@@ -423,12 +447,32 @@ public:
bool isHighLatencyDef(int opc) const override;
- bool hasHighOperandLatency(const InstrItineraryData *ItinData,
+ bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI,
unsigned UseIdx) const override;
+
+ bool useMachineCombiner() const override {
+ return true;
+ }
+
+ /// Return true when there is potentially a faster code sequence
+ /// for an instruction chain ending in <Root>. All potential patterns are
+ /// output in the <Pattern> array.
+ bool getMachineCombinerPatterns(
+ MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &P) const override;
+
+ /// When getMachineCombinerPatterns() finds a pattern, this function generates
+ /// the instructions that could replace the original code sequence.
+ void genAlternativeCodeSequence(
+ MachineInstr &Root, MachineCombinerPattern::MC_PATTERN P,
+ SmallVectorImpl<MachineInstr *> &InsInstrs,
+ SmallVectorImpl<MachineInstr *> &DelInstrs,
+ DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
+
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
@@ -468,6 +512,6 @@ private:
int &FrameIndex) const;
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index 8294e38e9957..95629184f2cf 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -2234,14 +2234,27 @@ def CVTDQ2PDrr : S2SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
[(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))],
IIC_SSE_CVT_PD_RM>, Sched<[WriteCvtI2F]>;
-// AVX 256-bit register conversion intrinsics
+// AVX register conversion intrinsics
let Predicates = [HasAVX] in {
+ def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
+ (VCVTDQ2PDrr VR128:$src)>;
+ def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
+ (VCVTDQ2PDrm addr:$src)>;
+
def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
(VCVTDQ2PDYrr VR128:$src)>;
def : Pat<(v4f64 (sint_to_fp (bc_v4i32 (loadv2i64 addr:$src)))),
(VCVTDQ2PDYrm addr:$src)>;
} // Predicates = [HasAVX]
+// SSE2 register conversion intrinsics
+let Predicates = [HasSSE2] in {
+ def : Pat<(v2f64 (X86cvtdq2pd (v4i32 VR128:$src))),
+ (CVTDQ2PDrr VR128:$src)>;
+ def : Pat<(v2f64 (X86cvtdq2pd (bc_v4i32 (loadv2i64 addr:$src)))),
+ (CVTDQ2PDrm addr:$src)>;
+} // Predicates = [HasSSE2]
+
// Convert packed double to packed single
// The assembler can recognize rr 256-bit instructions by seeing a ymm
// register, but the same isn't true when using memory operands instead.
diff --git a/lib/Target/X86/X86IntrinsicsInfo.h b/lib/Target/X86/X86IntrinsicsInfo.h
index 0268066c2ba1..2b829301e327 100644
--- a/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/lib/Target/X86/X86IntrinsicsInfo.h
@@ -242,6 +242,13 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx2_psubus_b, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(avx2_psubus_w, INTR_TYPE_2OP, X86ISD::SUBUS, 0),
X86_INTRINSIC_DATA(avx2_vperm2i128, INTR_TYPE_3OP, X86ISD::VPERM2X128, 0),
+ X86_INTRINSIC_DATA(avx512_cvtsi2sd32, INTR_TYPE_3OP, X86ISD::SINT_TO_FP_RND, 0),
+ X86_INTRINSIC_DATA(avx512_cvtsi2sd64, INTR_TYPE_3OP, X86ISD::SINT_TO_FP_RND, 0),
+ X86_INTRINSIC_DATA(avx512_cvtsi2ss32, INTR_TYPE_3OP, X86ISD::SINT_TO_FP_RND, 0),
+ X86_INTRINSIC_DATA(avx512_cvtsi2ss64, INTR_TYPE_3OP, X86ISD::SINT_TO_FP_RND, 0),
+ X86_INTRINSIC_DATA(avx512_cvtusi2ss, INTR_TYPE_3OP, X86ISD::UINT_TO_FP_RND, 0),
+ X86_INTRINSIC_DATA(avx512_cvtusi642sd, INTR_TYPE_3OP, X86ISD::UINT_TO_FP_RND, 0),
+ X86_INTRINSIC_DATA(avx512_cvtusi642ss, INTR_TYPE_3OP, X86ISD::UINT_TO_FP_RND, 0),
X86_INTRINSIC_DATA(avx512_exp2_pd, INTR_TYPE_1OP_MASK_RM, X86ISD::EXP2, 0),
X86_INTRINSIC_DATA(avx512_exp2_ps, INTR_TYPE_1OP_MASK_RM, X86ISD::EXP2, 0),
X86_INTRINSIC_DATA(avx512_mask_add_pd_128, INTR_TYPE_2OP_MASK, ISD::FADD, 0),
@@ -469,6 +476,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_pandn_q_128, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
X86_INTRINSIC_DATA(avx512_mask_pandn_q_256, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
X86_INTRINSIC_DATA(avx512_mask_pandn_q_512, INTR_TYPE_2OP_MASK, X86ISD::ANDNP, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pavg_b_128, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pavg_b_256, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pavg_b_512, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pavg_w_128, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pavg_w_256, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pavg_w_512, INTR_TYPE_2OP_MASK, X86ISD::AVG, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_128, CMP_MASK, X86ISD::PCMPEQM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_256, CMP_MASK, X86ISD::PCMPEQM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpeq_b_512, CMP_MASK, X86ISD::PCMPEQM, 0),
@@ -493,6 +506,54 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_128, CMP_MASK, X86ISD::PCMPGTM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_256, CMP_MASK, X86ISD::PCMPGTM, 0),
X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_512, CMP_MASK, X86ISD::PCMPGTM, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_b_128, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_b_256, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_b_512, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_d_128, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_d_256, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_d_512, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_q_128, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_q_256, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_q_512, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_w_128, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_w_256, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxs_w_512, INTR_TYPE_2OP_MASK, X86ISD::SMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_b_128, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_b_256, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_b_512, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_d_128, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_d_256, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_d_512, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_q_128, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_q_256, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_q_512, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_w_128, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_w_256, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmaxu_w_512, INTR_TYPE_2OP_MASK, X86ISD::UMAX, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_b_128, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_b_256, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_b_512, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_d_128, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_d_256, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_d_512, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_q_128, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_q_256, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_q_512, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_w_128, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_w_256, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pmins_w_512, INTR_TYPE_2OP_MASK, X86ISD::SMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_b_128, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_b_256, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_b_512, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_d_128, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_d_256, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_d_512, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_q_128, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_q_256, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_q_512, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_w_128, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_w_256, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
+ X86_INTRINSIC_DATA(avx512_mask_pminu_w_512, INTR_TYPE_2OP_MASK, X86ISD::UMIN, 0),
X86_INTRINSIC_DATA(avx512_mask_pmul_dq_128, INTR_TYPE_2OP_MASK,
X86ISD::PMULDQ, 0),
X86_INTRINSIC_DATA(avx512_mask_pmul_dq_256, INTR_TYPE_2OP_MASK,
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index ff1436af4ece..64135e0f53e5 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -17,6 +17,7 @@
#include "InstPrinter/X86ATTInstPrinter.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "Utils/X86ShuffleDecode.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -50,6 +51,8 @@ class X86MCInstLower {
public:
X86MCInstLower(const MachineFunction &MF, X86AsmPrinter &asmprinter);
+ Optional<MCOperand> LowerMachineOperand(const MachineInstr *MI,
+ const MachineOperand &MO) const;
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
@@ -109,7 +112,7 @@ namespace llvm {
OutStreamer->EmitInstruction(Inst, getSubtargetInfo());
SMShadowTracker.count(Inst, getSubtargetInfo());
}
-} // end llvm namespace
+} // namespace llvm
X86MCInstLower::X86MCInstLower(const MachineFunction &mf,
X86AsmPrinter &asmprinter)
@@ -402,47 +405,43 @@ static unsigned getRetOpcode(const X86Subtarget &Subtarget) {
return Subtarget.is64Bit() ? X86::RETQ : X86::RETL;
}
+Optional<MCOperand>
+X86MCInstLower::LowerMachineOperand(const MachineInstr *MI,
+ const MachineOperand &MO) const {
+ switch (MO.getType()) {
+ default:
+ MI->dump();
+ llvm_unreachable("unknown operand type");
+ case MachineOperand::MO_Register:
+ // Ignore all implicit register operands.
+ if (MO.isImplicit())
+ return None;
+ return MCOperand::createReg(MO.getReg());
+ case MachineOperand::MO_Immediate:
+ return MCOperand::createImm(MO.getImm());
+ case MachineOperand::MO_MachineBasicBlock:
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_ExternalSymbol:
+ return LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
+ case MachineOperand::MO_JumpTableIndex:
+ return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
+ case MachineOperand::MO_ConstantPoolIndex:
+ return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
+ case MachineOperand::MO_BlockAddress:
+ return LowerSymbolOperand(
+ MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
+ case MachineOperand::MO_RegisterMask:
+ // Ignore call clobbers.
+ return None;
+ }
+}
+
void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
OutMI.setOpcode(MI->getOpcode());
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
-
- MCOperand MCOp;
- switch (MO.getType()) {
- default:
- MI->dump();
- llvm_unreachable("unknown operand type");
- case MachineOperand::MO_Register:
- // Ignore all implicit register operands.
- if (MO.isImplicit()) continue;
- MCOp = MCOperand::createReg(MO.getReg());
- break;
- case MachineOperand::MO_Immediate:
- MCOp = MCOperand::createImm(MO.getImm());
- break;
- case MachineOperand::MO_MachineBasicBlock:
- case MachineOperand::MO_GlobalAddress:
- case MachineOperand::MO_ExternalSymbol:
- MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
- break;
- case MachineOperand::MO_JumpTableIndex:
- MCOp = LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
- break;
- case MachineOperand::MO_ConstantPoolIndex:
- MCOp = LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
- break;
- case MachineOperand::MO_BlockAddress:
- MCOp = LowerSymbolOperand(MO,
- AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
- break;
- case MachineOperand::MO_RegisterMask:
- // Ignore call clobbers.
- continue;
- }
-
- OutMI.addOperand(MCOp);
- }
+ for (const MachineOperand &MO : MI->operands())
+ if (auto MaybeMCOp = LowerMachineOperand(MI, MO))
+ OutMI.addOperand(MaybeMCOp.getValue());
// Handle a few special cases to eliminate operand modifiers.
ReSimplify:
@@ -865,6 +864,28 @@ void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
SM.recordStatepoint(MI);
}
+void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ // FAULTING_LOAD_OP <def>, <handler label>, <load opcode>, <load operands>
+
+ unsigned LoadDefRegister = MI.getOperand(0).getReg();
+ MCSymbol *HandlerLabel = MI.getOperand(1).getMCSymbol();
+ unsigned LoadOpcode = MI.getOperand(2).getImm();
+ unsigned LoadOperandsBeginIdx = 3;
+
+ FM.recordFaultingOp(FaultMaps::FaultingLoad, HandlerLabel);
+
+ MCInst LoadMI;
+ LoadMI.setOpcode(LoadOpcode);
+ LoadMI.addOperand(MCOperand::createReg(LoadDefRegister));
+ for (auto I = MI.operands_begin() + LoadOperandsBeginIdx,
+ E = MI.operands_end();
+ I != E; ++I)
+ if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, *I))
+ LoadMI.addOperand(MaybeOperand.getValue());
+
+ OutStreamer->EmitInstruction(LoadMI, getSubtargetInfo());
+}
// Lower a stackmap of the form:
// <id>, <shadowBytes>, ...
@@ -1120,6 +1141,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case TargetOpcode::STATEPOINT:
return LowerSTATEPOINT(*MI, MCInstLowering);
+ case TargetOpcode::FAULTING_LOAD_OP:
+ return LowerFAULTING_LOAD_OP(*MI, MCInstLowering);
+
case TargetOpcode::STACKMAP:
return LowerSTACKMAP(*MI);
diff --git a/lib/Target/X86/X86MachineFunctionInfo.h b/lib/Target/X86/X86MachineFunctionInfo.h
index d598b55aae3e..342d26ab1fbb 100644
--- a/lib/Target/X86/X86MachineFunctionInfo.h
+++ b/lib/Target/X86/X86MachineFunctionInfo.h
@@ -179,6 +179,6 @@ public:
}
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86PadShortFunction.cpp b/lib/Target/X86/X86PadShortFunction.cpp
index 143e70bda9e7..33aa78ffdf8a 100644
--- a/lib/Target/X86/X86PadShortFunction.cpp
+++ b/lib/Target/X86/X86PadShortFunction.cpp
@@ -84,7 +84,7 @@ namespace {
};
char PadShortFunc::ID = 0;
-}
+} // namespace
FunctionPass *llvm::createX86PadShortFunctions() {
return new PadShortFunc();
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index e9b6bfc3273c..00e213423974 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -419,6 +419,22 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
return Reserved;
}
+void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
+ // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
+ // because the calling convention defines the EFLAGS register as NOT
+ // preserved.
+ //
+ // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
+ // an assert to track this and clear the register afterwards to avoid
+ // unnecessary crashes during release builds.
+ assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
+ "EFLAGS are not live-out from a patchpoint.");
+
+ // Also clean other registers that don't need preserving (IP).
+ for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
+ Mask[Reg / 32] &= ~(1U << (Reg % 32));
+}
+
//===----------------------------------------------------------------------===//
// Stack Frame Processing methods
//===----------------------------------------------------------------------===//
@@ -765,4 +781,4 @@ unsigned get512BitSuperRegister(unsigned Reg) {
llvm_unreachable("Unexpected SIMD register");
}
-}
+} // namespace llvm
diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h
index a714c2a33d06..459ecf7fff72 100644
--- a/lib/Target/X86/X86RegisterInfo.h
+++ b/lib/Target/X86/X86RegisterInfo.h
@@ -104,6 +104,8 @@ public:
/// register scavenger to determine what registers are free.
BitVector getReservedRegs(const MachineFunction &MF) const override;
+ void adjustStackMapLiveOutMask(uint32_t *Mask) const override;
+
bool hasBasePointer(const MachineFunction &MF) const;
bool canRealignStack(const MachineFunction &MF) const;
@@ -134,6 +136,6 @@ unsigned getX86SubSuperRegister(unsigned, MVT::SimpleValueType, bool High=false)
//get512BitRegister - X86 utility - returns 512-bit super register
unsigned get512BitSuperRegister(unsigned Reg);
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86SelectionDAGInfo.h b/lib/Target/X86/X86SelectionDAGInfo.h
index eb7e0ed9de6c..25606d3f5df3 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.h
+++ b/lib/Target/X86/X86SelectionDAGInfo.h
@@ -48,6 +48,6 @@ public:
MachinePointerInfo SrcPtrInfo) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 74af29f4566c..3b25d30dc221 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -287,7 +287,7 @@ X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
return *this;
}
-X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
+X86Subtarget::X86Subtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const X86TargetMachine &TM,
unsigned StackAlignOverride)
: X86GenSubtargetInfo(TT, CPU, FS), X86ProcFamily(Others),
@@ -300,8 +300,7 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
TargetTriple.getEnvironment() == Triple::CODE16),
TSInfo(*TM.getDataLayout()),
InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
- FrameLowering(TargetFrameLowering::StackGrowsDown, getStackAlignment(),
- is64Bit() ? -8 : -4) {
+ FrameLowering(*this, getStackAlignment()) {
// Determine the PICStyle based on the target selected.
if (TM.getRelocationModel() == Reloc::Static) {
// Unless we're in PIC or DynamicNoPIC mode, set the PIC style to None.
diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h
index a476f7aba932..6934061c6922 100644
--- a/lib/Target/X86/X86Subtarget.h
+++ b/lib/Target/X86/X86Subtarget.h
@@ -253,9 +253,8 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
- X86Subtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS, const X86TargetMachine &TM,
- unsigned StackAlignOverride);
+ X86Subtarget(const Triple &TT, const std::string &CPU, const std::string &FS,
+ const X86TargetMachine &TM, unsigned StackAlignOverride);
const X86TargetLowering *getTargetLowering() const override {
return &TLInfo;
@@ -491,6 +490,6 @@ public:
}
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp
index 646cff7c5bdb..3d6eb4f7ce02 100644
--- a/lib/Target/X86/X86TargetMachine.cpp
+++ b/lib/Target/X86/X86TargetMachine.cpp
@@ -24,6 +24,10 @@
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
+static cl::opt<bool> EnableMachineCombinerPass("x86-machine-combiner",
+ cl::desc("Enable the machine combiner pass"),
+ cl::init(true), cl::Hidden);
+
extern "C" void LLVMInitializeX86Target() {
// Register the target.
RegisterTargetMachine<X86TargetMachine> X(TheX86_32Target);
@@ -90,13 +94,14 @@ static std::string computeDataLayout(const Triple &TT) {
/// X86TargetMachine ctor - Create an X86 target.
///
-X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT, StringRef CPU,
- StringRef FS, const TargetOptions &Options,
+X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, computeDataLayout(Triple(TT)), TT, CPU, FS, Options,
- RM, CM, OL),
- TLOF(createTLOF(Triple(getTargetTriple()))),
+ : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, RM, CM,
+ OL),
+ TLOF(createTLOF(getTargetTriple())),
Subtarget(TT, CPU, FS, *this, Options.StackAlignmentOverride) {
// Windows stack unwinder gets confused when execution flow "falls through"
// after a call to 'noreturn' function.
@@ -213,7 +218,7 @@ bool X86PassConfig::addInstSelector() {
addPass(createX86ISelDag(getX86TargetMachine(), getOptLevel()));
// For ELF, cleanup any local-dynamic TLS accesses.
- if (Triple(TM->getTargetTriple()).isOSBinFormatELF() &&
+ if (TM->getTargetTriple().isOSBinFormatELF() &&
getOptLevel() != CodeGenOpt::None)
addPass(createCleanupLocalDynamicTLSPass());
@@ -224,12 +229,14 @@ bool X86PassConfig::addInstSelector() {
bool X86PassConfig::addILPOpts() {
addPass(&EarlyIfConverterID);
+ if (EnableMachineCombinerPass)
+ addPass(&MachineCombinerID);
return true;
}
bool X86PassConfig::addPreISel() {
// Only add this pass for 32-bit x86 Windows.
- Triple TT(TM->getTargetTriple());
+ const Triple &TT = TM->getTargetTriple();
if (TT.isOSWindows() && TT.getArch() == Triple::x86)
addPass(createX86WinEHStatePass());
return true;
diff --git a/lib/Target/X86/X86TargetMachine.h b/lib/Target/X86/X86TargetMachine.h
index c9833ed39e24..be56888b75f4 100644
--- a/lib/Target/X86/X86TargetMachine.h
+++ b/lib/Target/X86/X86TargetMachine.h
@@ -29,8 +29,8 @@ class X86TargetMachine final : public LLVMTargetMachine {
mutable StringMap<std::unique_ptr<X86Subtarget>> SubtargetMap;
public:
- X86TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS,
- const TargetOptions &Options, Reloc::Model RM,
+ X86TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options, Reloc::Model RM,
CodeModel::Model CM, CodeGenOpt::Level OL);
~X86TargetMachine() override;
const X86Subtarget *getSubtargetImpl(const Function &F) const override;
@@ -44,6 +44,6 @@ public:
}
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp
index bbfeba8b9d8d..13384fab5985 100644
--- a/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -153,13 +153,13 @@ unsigned X86TTIImpl::getArithmeticInstrCost(
{ ISD::SHL, MVT::v4i64, 1 },
{ ISD::SRL, MVT::v4i64, 1 },
- { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
+ { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
{ ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
- { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
+ { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
{ ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
- { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
+ { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
{ ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
{ ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
@@ -253,19 +253,19 @@ unsigned X86TTIImpl::getArithmeticInstrCost(
// to ISel. The cost model must return worst case assumptions because it is
// used for vectorization and we don't want to make vectorized code worse
// than scalar code.
- { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
- { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
- { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
+ { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
+ { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
+ { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
{ ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
{ ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
- { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
- { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
+ { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
+ { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
{ ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
{ ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
- { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
- { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
+ { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
+ { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
{ ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
{ ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp
index 6925b272b4a5..71ce45b0bc2e 100644
--- a/lib/Target/X86/X86VZeroUpper.cpp
+++ b/lib/Target/X86/X86VZeroUpper.cpp
@@ -86,7 +86,7 @@ namespace {
};
char VZeroUpperInserter::ID = 0;
-}
+} // namespace
FunctionPass *llvm::createX86IssueVZeroUpperPass() {
return new VZeroUpperInserter();
diff --git a/lib/Target/X86/X86WinEHState.cpp b/lib/Target/X86/X86WinEHState.cpp
index ce69ea721993..c9e80945549b 100644
--- a/lib/Target/X86/X86WinEHState.cpp
+++ b/lib/Target/X86/X86WinEHState.cpp
@@ -60,9 +60,10 @@ public:
private:
void emitExceptionRegistrationRecord(Function *F);
- void linkExceptionRegistration(IRBuilder<> &Builder, Value *Handler);
+ void linkExceptionRegistration(IRBuilder<> &Builder, Function *Handler);
void unlinkExceptionRegistration(IRBuilder<> &Builder);
void addCXXStateStores(Function &F, MachineModuleInfo &MMI);
+ void addSEHStateStores(Function &F, MachineModuleInfo &MMI);
void addCXXStateStoresToFunclet(Value *ParentRegNode, WinEHFuncInfo &FuncInfo,
Function &F, int BaseState);
void insertStateNumberStore(Value *ParentRegNode, Instruction *IP, int State);
@@ -104,7 +105,7 @@ private:
/// The linked list node subobject inside of RegNode.
Value *Link = nullptr;
};
-}
+} // namespace
FunctionPass *llvm::createX86WinEHStatePass() { return new WinEHStatePass(); }
@@ -145,16 +146,10 @@ bool WinEHStatePass::runOnFunction(Function &F) {
return false;
// Check the personality. Do nothing if this is not an MSVC personality.
- LandingPadInst *LP = nullptr;
- for (BasicBlock &BB : F) {
- LP = BB.getLandingPadInst();
- if (LP)
- break;
- }
- if (!LP)
+ if (!F.hasPersonalityFn())
return false;
PersonalityFn =
- dyn_cast<Function>(LP->getPersonalityFn()->stripPointerCasts());
+ dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
if (!PersonalityFn)
return false;
Personality = classifyEHPersonality(PersonalityFn);
@@ -171,8 +166,10 @@ bool WinEHStatePass::runOnFunction(Function &F) {
auto *MMIPtr = getAnalysisIfAvailable<MachineModuleInfo>();
assert(MMIPtr && "MachineModuleInfo should always be available");
MachineModuleInfo &MMI = *MMIPtr;
- if (Personality == EHPersonality::MSVC_CXX) {
- addCXXStateStores(F, MMI);
+ switch (Personality) {
+ default: llvm_unreachable("unexpected personality function");
+ case EHPersonality::MSVC_CXX: addCXXStateStores(F, MMI); break;
+ case EHPersonality::MSVC_X86SEH: addSEHStateStores(F, MMI); break;
}
// Reset per-function state.
@@ -258,7 +255,6 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
if (Personality == EHPersonality::MSVC_CXX) {
RegNodeTy = getCXXEHRegistrationType();
RegNode = Builder.CreateAlloca(RegNodeTy);
- // FIXME: We can skip this in -GS- mode, when we figure that out.
// SavedESP = llvm.stacksave()
Value *SP = Builder.CreateCall(
Intrinsic::getDeclaration(TheModule, Intrinsic::stacksave), {});
@@ -360,11 +356,14 @@ Function *WinEHStatePass::generateLSDAInEAXThunk(Function *ParentFunc) {
}
void WinEHStatePass::linkExceptionRegistration(IRBuilder<> &Builder,
- Value *Handler) {
+ Function *Handler) {
+ // Emit the .safeseh directive for this function.
+ Handler->addFnAttr("safeseh");
+
Type *LinkTy = getEHLinkRegistrationType();
// Handler = Handler
- Handler = Builder.CreateBitCast(Handler, Builder.getInt8PtrTy());
- Builder.CreateStore(Handler, Builder.CreateStructGEP(LinkTy, Link, 1));
+ Value *HandlerI8 = Builder.CreateBitCast(Handler, Builder.getInt8PtrTy());
+ Builder.CreateStore(HandlerI8, Builder.CreateStructGEP(LinkTy, Link, 1));
// Next = [fs:00]
Constant *FSZero =
Constant::getNullValue(LinkTy->getPointerTo()->getPointerTo(257));
@@ -472,6 +471,74 @@ void WinEHStatePass::addCXXStateStoresToFunclet(Value *ParentRegNode,
}
}
+/// Assign every distinct landingpad a unique state number for SEH. Unlike C++
+/// EH, we can use this very simple algorithm while C++ EH cannot because catch
+/// handlers aren't outlined and the runtime doesn't have to figure out which
+/// catch handler frame to unwind to.
+/// FIXME: __finally blocks are outlined, so this approach may break down there.
+void WinEHStatePass::addSEHStateStores(Function &F, MachineModuleInfo &MMI) {
+ WinEHFuncInfo &FuncInfo = MMI.getWinEHFuncInfo(&F);
+
+ // Remember and return the index that we used. We save it in WinEHFuncInfo so
+ // that we can lower llvm.x86.seh.exceptioninfo later in filter functions
+ // without too much trouble.
+ int RegNodeEscapeIndex = escapeRegNode(F);
+ FuncInfo.EHRegNodeEscapeIndex = RegNodeEscapeIndex;
+
+ // Iterate all the instructions and emit state number stores.
+ int CurState = 0;
+ SmallPtrSet<BasicBlock *, 4> ExceptBlocks;
+ for (BasicBlock &BB : F) {
+ for (auto I = BB.begin(), E = BB.end(); I != E; ++I) {
+ if (auto *CI = dyn_cast<CallInst>(I)) {
+ auto *Intrin = dyn_cast<IntrinsicInst>(CI);
+ if (Intrin) {
+ // Calls that "don't throw" are considered to be able to throw asynch
+ // exceptions, but intrinsics cannot.
+ continue;
+ }
+ insertStateNumberStore(RegNode, CI, -1);
+ } else if (auto *II = dyn_cast<InvokeInst>(I)) {
+ // Look up the state number of the landingpad this unwinds to.
+ LandingPadInst *LPI = II->getUnwindDest()->getLandingPadInst();
+ auto InsertionPair =
+ FuncInfo.LandingPadStateMap.insert(std::make_pair(LPI, CurState));
+ auto Iter = InsertionPair.first;
+ int &State = Iter->second;
+ bool Inserted = InsertionPair.second;
+ if (Inserted) {
+ // Each action consumes a state number.
+ auto *EHActions = cast<IntrinsicInst>(LPI->getNextNode());
+ SmallVector<std::unique_ptr<ActionHandler>, 4> ActionList;
+ parseEHActions(EHActions, ActionList);
+ assert(!ActionList.empty());
+ CurState += ActionList.size();
+ State += ActionList.size() - 1;
+
+ // Remember all the __except block targets.
+ for (auto &Handler : ActionList) {
+ if (auto *CH = dyn_cast<CatchHandler>(Handler.get())) {
+ auto *BA = cast<BlockAddress>(CH->getHandlerBlockOrFunc());
+ ExceptBlocks.insert(BA->getBasicBlock());
+ }
+ }
+ }
+ insertStateNumberStore(RegNode, II, State);
+ }
+ }
+ }
+
+ // Insert llvm.stackrestore into each __except block.
+ Function *StackRestore =
+ Intrinsic::getDeclaration(TheModule, Intrinsic::stackrestore);
+ for (BasicBlock *ExceptBB : ExceptBlocks) {
+ IRBuilder<> Builder(ExceptBB->begin());
+ Value *SP =
+ Builder.CreateLoad(Builder.CreateStructGEP(RegNodeTy, RegNode, 0));
+ Builder.CreateCall(StackRestore, {SP});
+ }
+}
+
void WinEHStatePass::insertStateNumberStore(Value *ParentRegNode,
Instruction *IP, int State) {
IRBuilder<> Builder(IP);
diff --git a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
index 2e44ac949b2c..e1baeacc3e57 100644
--- a/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
+++ b/lib/Target/XCore/Disassembler/XCoreDisassembler.cpp
@@ -40,7 +40,7 @@ public:
raw_ostream &VStream,
raw_ostream &CStream) const override;
};
-}
+} // namespace
static bool readInstruction16(ArrayRef<uint8_t> Bytes, uint64_t Address,
uint64_t &Size, uint16_t &Insn) {
diff --git a/lib/Target/XCore/LLVMBuild.txt b/lib/Target/XCore/LLVMBuild.txt
index 0504e8ab8f0c..401e0526f580 100644
--- a/lib/Target/XCore/LLVMBuild.txt
+++ b/lib/Target/XCore/LLVMBuild.txt
@@ -29,5 +29,17 @@ has_disassembler = 1
type = Library
name = XCoreCodeGen
parent = XCore
-required_libraries = Analysis AsmPrinter CodeGen Core MC SelectionDAG Support Target TransformUtils XCoreAsmPrinter XCoreDesc XCoreInfo
+required_libraries =
+ Analysis
+ AsmPrinter
+ CodeGen
+ Core
+ MC
+ SelectionDAG
+ Support
+ Target
+ TransformUtils
+ XCoreAsmPrinter
+ XCoreDesc
+ XCoreInfo
add_to_library_groups = XCore
diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
index f0e459620c9c..8699ce84006c 100644
--- a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
+++ b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
@@ -46,8 +46,8 @@ static MCRegisterInfo *createXCoreMCRegisterInfo(StringRef TT) {
return X;
}
-static MCSubtargetInfo *createXCoreMCSubtargetInfo(StringRef TT, StringRef CPU,
- StringRef FS) {
+static MCSubtargetInfo *
+createXCoreMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
MCSubtargetInfo *X = new MCSubtargetInfo();
InitXCoreMCSubtargetInfo(X, TT, CPU, FS);
return X;
@@ -123,7 +123,7 @@ void XCoreTargetAsmStreamer::emitCCBottomData(StringRef Name) {
void XCoreTargetAsmStreamer::emitCCBottomFunction(StringRef Name) {
OS << "\t.cc_bottom " << Name << ".function\n";
}
-}
+} // namespace
static MCTargetStreamer *createTargetAsmStreamer(MCStreamer &S,
formatted_raw_ostream &OS,
diff --git a/lib/Target/XCore/XCore.h b/lib/Target/XCore/XCore.h
index ba6ca843671e..eb8b5ec0b112 100644
--- a/lib/Target/XCore/XCore.h
+++ b/lib/Target/XCore/XCore.h
@@ -32,6 +32,6 @@ namespace llvm {
CodeGenOpt::Level OptLevel);
ModulePass *createXCoreLowerThreadLocalPass();
-} // end namespace llvm;
+} // namespace llvm
#endif
diff --git a/lib/Target/XCore/XCoreFrameLowering.h b/lib/Target/XCore/XCoreFrameLowering.h
index 607c77248952..116e89a60ee4 100644
--- a/lib/Target/XCore/XCoreFrameLowering.h
+++ b/lib/Target/XCore/XCoreFrameLowering.h
@@ -58,6 +58,6 @@ namespace llvm {
return 4;
}
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp b/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp
index 77292c4f8f52..8d96105a2ebc 100644
--- a/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp
+++ b/lib/Target/XCore/XCoreFrameToArgsOffsetElim.cpp
@@ -34,7 +34,7 @@ namespace {
}
};
char XCoreFTAOElim::ID = 0;
-}
+} // namespace
/// createXCoreFrameToArgsOffsetEliminationPass - returns an instance of the
/// Frame to args offset elimination pass
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index 97f0494b6fe3..9c49a8d0dbaa 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -85,7 +85,7 @@ namespace llvm {
// Memory barrier.
MEMBARRIER
};
- }
+ } // namespace XCoreISD
//===--------------------------------------------------------------------===//
// TargetLowering Implementation
@@ -215,6 +215,6 @@ namespace llvm {
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
LLVMContext &Context) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/XCore/XCoreInstrInfo.cpp b/lib/Target/XCore/XCoreInstrInfo.cpp
index c310aa3a179f..a6e974e2e622 100644
--- a/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -41,7 +41,7 @@ namespace XCore {
COND_INVALID
};
}
-}
+} // namespace llvm
// Pin the vtable to this file.
void XCoreInstrInfo::anchor() {}
@@ -281,7 +281,7 @@ XCoreInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
unsigned
XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ ArrayRef<MachineOperand> Cond,
DebugLoc DL)const{
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
diff --git a/lib/Target/XCore/XCoreInstrInfo.h b/lib/Target/XCore/XCoreInstrInfo.h
index 60bb3f8c39af..70beb4179118 100644
--- a/lib/Target/XCore/XCoreInstrInfo.h
+++ b/lib/Target/XCore/XCoreInstrInfo.h
@@ -56,8 +56,7 @@ public:
bool AllowModify) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
DebugLoc DL) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
@@ -89,6 +88,6 @@ public:
unsigned Reg, uint64_t Value) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
index 996c6f59346d..f866ab063396 100644
--- a/lib/Target/XCore/XCoreLowerThreadLocal.cpp
+++ b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
@@ -50,7 +50,7 @@ namespace {
bool runOnModule(Module &M) override;
};
-}
+} // namespace
char XCoreLowerThreadLocal::ID = 0;
diff --git a/lib/Target/XCore/XCoreMCInstLower.h b/lib/Target/XCore/XCoreMCInstLower.h
index 569147872f23..74a7f20570e8 100644
--- a/lib/Target/XCore/XCoreMCInstLower.h
+++ b/lib/Target/XCore/XCoreMCInstLower.h
@@ -37,6 +37,6 @@ private:
MCOperand LowerSymbolOperand(const MachineOperand &MO,
MachineOperandType MOTy, unsigned Offset) const;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/XCore/XCoreMachineFunctionInfo.h b/lib/Target/XCore/XCoreMachineFunctionInfo.h
index 078ffde18fb9..8cce75fd0a73 100644
--- a/lib/Target/XCore/XCoreMachineFunctionInfo.h
+++ b/lib/Target/XCore/XCoreMachineFunctionInfo.h
@@ -101,6 +101,6 @@ public:
return SpillLabels;
}
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/XCore/XCoreSelectionDAGInfo.h b/lib/Target/XCore/XCoreSelectionDAGInfo.h
index cfd80b3f3172..622484374a42 100644
--- a/lib/Target/XCore/XCoreSelectionDAGInfo.h
+++ b/lib/Target/XCore/XCoreSelectionDAGInfo.h
@@ -35,6 +35,6 @@ public:
MachinePointerInfo SrcPtrInfo) const override;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/XCore/XCoreSubtarget.cpp b/lib/Target/XCore/XCoreSubtarget.cpp
index 79960207a45a..c98518b60225 100644
--- a/lib/Target/XCore/XCoreSubtarget.cpp
+++ b/lib/Target/XCore/XCoreSubtarget.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
void XCoreSubtarget::anchor() { }
-XCoreSubtarget::XCoreSubtarget(const std::string &TT, const std::string &CPU,
+XCoreSubtarget::XCoreSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM)
: XCoreGenSubtargetInfo(TT, CPU, FS), InstrInfo(), FrameLowering(*this),
TLInfo(TM, *this), TSInfo(*TM.getDataLayout()) {}
diff --git a/lib/Target/XCore/XCoreSubtarget.h b/lib/Target/XCore/XCoreSubtarget.h
index da51ef1c7a81..74ee594e9c5a 100644
--- a/lib/Target/XCore/XCoreSubtarget.h
+++ b/lib/Target/XCore/XCoreSubtarget.h
@@ -40,9 +40,9 @@ public:
/// This constructor initializes the data members to match that
/// of the specified triple.
///
- XCoreSubtarget(const std::string &TT, const std::string &CPU,
+ XCoreSubtarget(const Triple &TT, const std::string &CPU,
const std::string &FS, const TargetMachine &TM);
-
+
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
@@ -61,6 +61,6 @@ public:
return &InstrInfo.getRegisterInfo();
}
};
-} // End llvm namespace
+} // namespace llvm
#endif
diff --git a/lib/Target/XCore/XCoreTargetMachine.cpp b/lib/Target/XCore/XCoreTargetMachine.cpp
index 228dc1c9db57..370b64b26688 100644
--- a/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -22,7 +22,7 @@ using namespace llvm;
/// XCoreTargetMachine ctor - Create an ILP32 architecture model
///
-XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT,
+XCoreTargetMachine::XCoreTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
diff --git a/lib/Target/XCore/XCoreTargetMachine.h b/lib/Target/XCore/XCoreTargetMachine.h
index 0d324ab1e728..a8addfc3e429 100644
--- a/lib/Target/XCore/XCoreTargetMachine.h
+++ b/lib/Target/XCore/XCoreTargetMachine.h
@@ -23,8 +23,8 @@ class XCoreTargetMachine : public LLVMTargetMachine {
std::unique_ptr<TargetLoweringObjectFile> TLOF;
XCoreSubtarget Subtarget;
public:
- XCoreTargetMachine(const Target &T, StringRef TT,
- StringRef CPU, StringRef FS, const TargetOptions &Options,
+ XCoreTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
~XCoreTargetMachine() override;
diff --git a/lib/Target/XCore/XCoreTargetStreamer.h b/lib/Target/XCore/XCoreTargetStreamer.h
index 3563dbc5cb7b..a82702fc99fc 100644
--- a/lib/Target/XCore/XCoreTargetStreamer.h
+++ b/lib/Target/XCore/XCoreTargetStreamer.h
@@ -22,6 +22,6 @@ public:
virtual void emitCCBottomData(StringRef Name) = 0;
virtual void emitCCBottomFunction(StringRef Name) = 0;
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Transforms/Hello/CMakeLists.txt b/lib/Transforms/Hello/CMakeLists.txt
index 3851b35871f5..e0b81907c7fb 100644
--- a/lib/Transforms/Hello/CMakeLists.txt
+++ b/lib/Transforms/Hello/CMakeLists.txt
@@ -12,4 +12,7 @@ endif()
add_llvm_loadable_module( LLVMHello
Hello.cpp
+
+ DEPENDS
+ intrinsics_gen
)
diff --git a/lib/Transforms/Hello/Hello.cpp b/lib/Transforms/Hello/Hello.cpp
index 29b9bb8a94ea..f90aafc75c22 100644
--- a/lib/Transforms/Hello/Hello.cpp
+++ b/lib/Transforms/Hello/Hello.cpp
@@ -35,7 +35,7 @@ namespace {
return false;
}
};
-}
+} // namespace
char Hello::ID = 0;
static RegisterPass<Hello> X("hello", "Hello World Pass");
@@ -58,7 +58,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char Hello2::ID = 0;
static RegisterPass<Hello2>
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index c7c57ab56444..86b3faa09b9c 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -92,7 +92,7 @@ namespace {
unsigned maxElements;
DenseMap<const Function *, DISubprogram *> FunctionDIs;
};
-}
+} // namespace
char ArgPromotion::ID = 0;
INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
@@ -245,6 +245,24 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
Argument *PtrArg = PointerArgs[i];
Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
+ // Replace sret attribute with noalias. This reduces register pressure by
+ // avoiding a register copy.
+ if (PtrArg->hasStructRetAttr()) {
+ unsigned ArgNo = PtrArg->getArgNo();
+ F->setAttributes(
+ F->getAttributes()
+ .removeAttribute(F->getContext(), ArgNo + 1, Attribute::StructRet)
+ .addAttribute(F->getContext(), ArgNo + 1, Attribute::NoAlias));
+ for (Use &U : F->uses()) {
+ CallSite CS(U.getUser());
+ CS.setAttributes(
+ CS.getAttributes()
+ .removeAttribute(F->getContext(), ArgNo + 1,
+ Attribute::StructRet)
+ .addAttribute(F->getContext(), ArgNo + 1, Attribute::NoAlias));
+ }
+ }
+
// If this is a byval argument, and if the aggregate type is small, just
// pass the elements, which is always safe, if the passed value is densely
// packed or if we can prove the padding bytes are never accessed. This does
@@ -553,7 +571,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg,
LoadInst *Load = Loads[i];
BasicBlock *BB = Load->getParent();
- AliasAnalysis::Location Loc = MemoryLocation::get(Load);
+ MemoryLocation Loc = MemoryLocation::get(Load);
if (AA.canInstructionRangeModRef(BB->front(), *Load, Loc,
AliasAnalysis::Mod))
return false; // Pointer is invalidated!
diff --git a/lib/Transforms/IPO/BarrierNoopPass.cpp b/lib/Transforms/IPO/BarrierNoopPass.cpp
index 6af104362594..7585fdced020 100644
--- a/lib/Transforms/IPO/BarrierNoopPass.cpp
+++ b/lib/Transforms/IPO/BarrierNoopPass.cpp
@@ -38,7 +38,7 @@ public:
bool runOnModule(Module &M) override { return false; }
};
-}
+} // namespace
ModulePass *llvm::createBarrierNoopPass() { return new BarrierNoop(); }
diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp
index 8ce7646621ff..3b68743920aa 100644
--- a/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/lib/Transforms/IPO/ConstantMerge.cpp
@@ -53,7 +53,7 @@ namespace {
unsigned getAlignment(GlobalVariable *GV) const;
};
-}
+} // namespace
char ConstantMerge::ID = 0;
INITIALIZE_PASS(ConstantMerge, "constmerge",
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 76898f275058..6bfd3d149316 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -159,7 +159,7 @@ namespace {
bool DeleteDeadVarargs(Function &Fn);
bool RemoveDeadArgumentsFromCallers(Function &Fn);
};
-}
+} // namespace
char DAE::ID = 0;
@@ -175,7 +175,7 @@ namespace {
bool ShouldHackArguments() const override { return true; }
};
-}
+} // namespace
char DAH::ID = 0;
INITIALIZE_PASS(DAH, "deadarghaX0r",
diff --git a/lib/Transforms/IPO/ExtractGV.cpp b/lib/Transforms/IPO/ExtractGV.cpp
index 2f8c7d9349b9..7e0dddc15d10 100644
--- a/lib/Transforms/IPO/ExtractGV.cpp
+++ b/lib/Transforms/IPO/ExtractGV.cpp
@@ -146,7 +146,7 @@ namespace {
};
char GVExtractorPass::ID = 0;
-}
+} // namespace
ModulePass *llvm::createGVExtractionPass(std::vector<GlobalValue *> &GVs,
bool deleteFn) {
diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp
index ef8f42ffd6d4..749ff9920a82 100644
--- a/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -132,7 +132,7 @@ namespace {
AliasAnalysis *AA;
TargetLibraryInfo *TLI;
};
-}
+} // namespace
char FunctionAttrs::ID = 0;
INITIALIZE_PASS_BEGIN(FunctionAttrs, "functionattrs",
@@ -208,8 +208,7 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
AAMDNodes AAInfo;
I->getAAMetadata(AAInfo);
- AliasAnalysis::Location Loc(Arg,
- AliasAnalysis::UnknownSize, AAInfo);
+ MemoryLocation Loc(Arg, MemoryLocation::UnknownSize, AAInfo);
if (!AA->pointsToConstantMemory(Loc, /*OrLocal=*/true)) {
if (MRB & AliasAnalysis::Mod)
// Writes non-local memory. Give up.
@@ -232,20 +231,20 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
} else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
// Ignore non-volatile loads from local memory. (Atomic is okay here.)
if (!LI->isVolatile()) {
- AliasAnalysis::Location Loc = MemoryLocation::get(LI);
+ MemoryLocation Loc = MemoryLocation::get(LI);
if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true))
continue;
}
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
// Ignore non-volatile stores to local memory. (Atomic is okay here.)
if (!SI->isVolatile()) {
- AliasAnalysis::Location Loc = MemoryLocation::get(SI);
+ MemoryLocation Loc = MemoryLocation::get(SI);
if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true))
continue;
}
} else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) {
// Ignore vaargs on local memory.
- AliasAnalysis::Location Loc = MemoryLocation::get(VI);
+ MemoryLocation Loc = MemoryLocation::get(VI);
if (AA->pointsToConstantMemory(Loc, /*OrLocal=*/true))
continue;
}
@@ -380,7 +379,7 @@ namespace {
const SmallPtrSet<Function*, 8> &SCCNodes;
};
-}
+} // namespace
namespace llvm {
template<> struct GraphTraits<ArgumentGraphNode*> {
@@ -407,7 +406,7 @@ namespace llvm {
return AG->end();
}
};
-}
+} // namespace llvm
// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
static Attribute::AttrKind
diff --git a/lib/Transforms/IPO/GlobalDCE.cpp b/lib/Transforms/IPO/GlobalDCE.cpp
index ba04c80508c4..7983104dba94 100644
--- a/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/lib/Transforms/IPO/GlobalDCE.cpp
@@ -57,7 +57,7 @@ namespace {
bool RemoveUnusedGlobalValue(GlobalValue &GV);
};
-}
+} // namespace
/// Returns true if F contains only a single "ret" instruction.
static bool isEmptyFunction(Function *F) {
@@ -228,6 +228,9 @@ void GlobalDCE::GlobalIsNeeded(GlobalValue *G) {
if (F->hasPrologueData())
MarkUsedGlobalsAsNeeded(F->getPrologueData());
+ if (F->hasPersonalityFn())
+ MarkUsedGlobalsAsNeeded(F->getPersonalityFn());
+
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
for (User::op_iterator U = I->op_begin(), E = I->op_end(); U != E; ++U)
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index cc4a79fa67de..0d83c820aa07 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -89,7 +89,7 @@ namespace {
TargetLibraryInfo *TLI;
SmallSet<const Comdat *, 8> NotDiscardableComdats;
};
-}
+} // namespace
char GlobalOpt::ID = 0;
INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt",
@@ -2786,7 +2786,7 @@ public:
setUsedInitializer(*CompilerUsedV, CompilerUsed);
}
};
-}
+} // namespace
static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
if (GA.use_empty()) // No use at all.
diff --git a/lib/Transforms/IPO/IPConstantPropagation.cpp b/lib/Transforms/IPO/IPConstantPropagation.cpp
index af541d155254..d717b25a47c0 100644
--- a/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -45,7 +45,7 @@ namespace {
bool PropagateConstantsIntoArguments(Function &F);
bool PropagateConstantReturn(Function &F);
};
-}
+} // namespace
char IPCP::ID = 0;
INITIALIZE_PASS(IPCP, "ipconstprop",
diff --git a/lib/Transforms/IPO/InlineAlways.cpp b/lib/Transforms/IPO/InlineAlways.cpp
index dc56a02e7b7d..37ff091a49cd 100644
--- a/lib/Transforms/IPO/InlineAlways.cpp
+++ b/lib/Transforms/IPO/InlineAlways.cpp
@@ -62,7 +62,7 @@ public:
}
};
-}
+} // namespace
char AlwaysInliner::ID = 0;
INITIALIZE_PASS_BEGIN(AlwaysInliner, "always-inline",
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index 8f65a983a813..93cdba6f5b58 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -93,19 +93,26 @@ static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) {
// clutter to the IR.
AttrBuilder B;
B.addAttribute(Attribute::StackProtect)
- .addAttribute(Attribute::StackProtectStrong);
+ .addAttribute(Attribute::StackProtectStrong)
+ .addAttribute(Attribute::StackProtectReq);
AttributeSet OldSSPAttr = AttributeSet::get(Caller->getContext(),
AttributeSet::FunctionIndex,
B);
- if (Callee->hasFnAttribute(Attribute::StackProtectReq)) {
+ if (Callee->hasFnAttribute(Attribute::SafeStack)) {
+ Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
+ Caller->addFnAttr(Attribute::SafeStack);
+ } else if (Callee->hasFnAttribute(Attribute::StackProtectReq) &&
+ !Caller->hasFnAttribute(Attribute::SafeStack)) {
Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
Caller->addFnAttr(Attribute::StackProtectReq);
} else if (Callee->hasFnAttribute(Attribute::StackProtectStrong) &&
+ !Caller->hasFnAttribute(Attribute::SafeStack) &&
!Caller->hasFnAttribute(Attribute::StackProtectReq)) {
Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
Caller->addFnAttr(Attribute::StackProtectStrong);
} else if (Callee->hasFnAttribute(Attribute::StackProtect) &&
+ !Caller->hasFnAttribute(Attribute::SafeStack) &&
!Caller->hasFnAttribute(Attribute::StackProtectReq) &&
!Caller->hasFnAttribute(Attribute::StackProtectStrong))
Caller->addFnAttr(Attribute::StackProtect);
@@ -431,8 +438,8 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
SmallPtrSet<Function*, 8> SCCFunctions;
DEBUG(dbgs() << "Inliner visiting SCC:");
- for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
- Function *F = (*I)->getFunction();
+ for (CallGraphNode *Node : SCC) {
+ Function *F = Node->getFunction();
if (F) SCCFunctions.insert(F);
DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
}
@@ -448,13 +455,13 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// index into the InlineHistory vector.
SmallVector<std::pair<Function*, int>, 8> InlineHistory;
- for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
- Function *F = (*I)->getFunction();
+ for (CallGraphNode *Node : SCC) {
+ Function *F = Node->getFunction();
if (!F) continue;
- for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- CallSite CS(cast<Value>(I));
+ for (BasicBlock &BB : *F)
+ for (Instruction &I : BB) {
+ CallSite CS(cast<Value>(&I));
// If this isn't a call, or it is a call to an intrinsic, it can
// never be inlined.
if (!CS || isa<IntrinsicInst>(I))
@@ -496,6 +503,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
LocalChange = false;
// Iterate over the outer loop because inlining functions can cause indirect
// calls to become direct calls.
+ // CallSites may be modified inside so ranged for loop can not be used.
for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
CallSite CS = CallSites[CSi].first;
@@ -566,11 +574,8 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
int NewHistoryID = InlineHistory.size();
InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
- for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
- i != e; ++i) {
- Value *Ptr = InlineInfo.InlinedCalls[i];
+ for (Value *Ptr : InlineInfo.InlinedCalls)
CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
- }
}
}
diff --git a/lib/Transforms/IPO/LoopExtractor.cpp b/lib/Transforms/IPO/LoopExtractor.cpp
index 41334ca5b429..ada4a76bf3ba 100644
--- a/lib/Transforms/IPO/LoopExtractor.cpp
+++ b/lib/Transforms/IPO/LoopExtractor.cpp
@@ -51,7 +51,7 @@ namespace {
AU.addRequired<DominatorTreeWrapperPass>();
}
};
-}
+} // namespace
char LoopExtractor::ID = 0;
INITIALIZE_PASS_BEGIN(LoopExtractor, "loop-extract",
@@ -183,7 +183,7 @@ namespace {
bool runOnModule(Module &M) override;
};
-}
+} // namespace
char BlockExtractorPass::ID = 0;
INITIALIZE_PASS(BlockExtractorPass, "extract-blocks",
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 052f1b4b1325..5e41798ad8d4 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -409,7 +409,7 @@ public:
return (FunctionComparator(F, RHS.getFunc()).compare()) == -1;
}
};
-}
+} // namespace
int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
if (L < R) return -1;
@@ -1397,28 +1397,26 @@ void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) {
if (F->mayBeOverridden()) {
assert(G->mayBeOverridden());
- if (HasGlobalAliases) {
- // Make them both thunks to the same internal function.
- Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
- F->getParent());
- H->copyAttributesFrom(F);
- H->takeName(F);
- removeUsers(F);
- F->replaceAllUsesWith(H);
+ // Make them both thunks to the same internal function.
+ Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
+ F->getParent());
+ H->copyAttributesFrom(F);
+ H->takeName(F);
+ removeUsers(F);
+ F->replaceAllUsesWith(H);
- unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment());
+ unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment());
+ if (HasGlobalAliases) {
writeAlias(F, G);
writeAlias(F, H);
-
- F->setAlignment(MaxAlignment);
- F->setLinkage(GlobalValue::PrivateLinkage);
} else {
- // We can't merge them. Instead, pick one and update all direct callers
- // to call it and hope that we improve the instruction cache hit rate.
- replaceDirectCallers(G, F);
+ writeThunk(F, G);
+ writeThunk(F, H);
}
+ F->setAlignment(MaxAlignment);
+ F->setLinkage(GlobalValue::PrivateLinkage);
++NumDoubleWeak;
} else {
writeThunkOrAlias(F, G);
diff --git a/lib/Transforms/IPO/PartialInlining.cpp b/lib/Transforms/IPO/PartialInlining.cpp
index 4a7cb7ba7d12..7a7065c30ab1 100644
--- a/lib/Transforms/IPO/PartialInlining.cpp
+++ b/lib/Transforms/IPO/PartialInlining.cpp
@@ -40,7 +40,7 @@ namespace {
private:
Function* unswitchFunction(Function* F);
};
-}
+} // namespace
char PartialInliner::ID = 0;
INITIALIZE_PASS(PartialInliner, "partial-inliner",
diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp
index 3496a663f53b..963f1bb13aaf 100644
--- a/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -94,7 +94,6 @@ PassManagerBuilder::PassManagerBuilder() {
SizeLevel = 0;
LibraryInfo = nullptr;
Inliner = nullptr;
- DisableTailCalls = false;
DisableUnitAtATime = false;
DisableUnrollLoops = false;
BBVectorize = RunBBVectorization;
@@ -238,8 +237,7 @@ void PassManagerBuilder::populateModulePassManager(
MPM.add(createInstructionCombiningPass()); // Combine silly seq's
addExtensionsToPM(EP_Peephole, MPM);
- if (!DisableTailCalls)
- MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
+ MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createReassociatePass()); // Reassociate expressions
// Rotate Loop - disable header duplication at -Oz
diff --git a/lib/Transforms/IPO/PruneEH.cpp b/lib/Transforms/IPO/PruneEH.cpp
index 1943b930cbf9..a5ba9eed6345 100644
--- a/lib/Transforms/IPO/PruneEH.cpp
+++ b/lib/Transforms/IPO/PruneEH.cpp
@@ -49,7 +49,7 @@ namespace {
bool SimplifyFunction(Function *F);
void DeleteBasicBlock(BasicBlock *BB);
};
-}
+} // namespace
char PruneEH::ID = 0;
INITIALIZE_PASS_BEGIN(PruneEH, "prune-eh",
@@ -177,7 +177,7 @@ bool PruneEH::SimplifyFunction(Function *F) {
bool MadeChange = false;
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator()))
- if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(II)) {
+ if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(F)) {
SmallVector<Value*, 8> Args(II->op_begin(), II->op_end() - 3);
// Insert a call instruction before the invoke.
CallInst *Call = CallInst::Create(II->getCalledValue(), Args, "", II);
diff --git a/lib/Transforms/IPO/StripSymbols.cpp b/lib/Transforms/IPO/StripSymbols.cpp
index 60c957347621..6f9af1dea200 100644
--- a/lib/Transforms/IPO/StripSymbols.cpp
+++ b/lib/Transforms/IPO/StripSymbols.cpp
@@ -95,7 +95,7 @@ namespace {
AU.setPreservesAll();
}
};
-}
+} // namespace
char StripSymbols::ID = 0;
INITIALIZE_PASS(StripSymbols, "strip",
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index a8d017255178..29ecc1d0b0a1 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -193,7 +193,7 @@ namespace {
void incCreateInstNum() {}
#endif
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
//
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index e83b9dd36ae8..6de380bcad67 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1391,11 +1391,29 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) {
// visitCallSite - Improvements for call and invoke instructions.
//
Instruction *InstCombiner::visitCallSite(CallSite CS) {
+
if (isAllocLikeFn(CS.getInstruction(), TLI))
return visitAllocSite(*CS.getInstruction());
bool Changed = false;
+ // Mark any parameters that are known to be non-null with the nonnull
+ // attribute. This is helpful for inlining calls to functions with null
+ // checks on their arguments.
+ unsigned ArgNo = 0;
+ for (Value *V : CS.args()) {
+ if (!CS.paramHasAttr(ArgNo+1, Attribute::NonNull) &&
+ isKnownNonNull(V)) {
+ AttributeSet AS = CS.getAttributes();
+ AS = AS.addAttribute(CS.getInstruction()->getContext(), ArgNo+1,
+ Attribute::NonNull);
+ CS.setAttributes(AS);
+ Changed = true;
+ }
+ ArgNo++;
+ }
+ assert(ArgNo == CS.arg_size() && "sanity check");
+
// If the callee is a pointer to a function, attempt to move any casts to the
// arguments of the call/invoke.
Value *Callee = CS.getCalledValue();
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index a554e9f628e0..6b384b4a9f7a 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -948,7 +948,7 @@ struct UDivFoldAction {
UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS)
: FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {}
};
-}
+} // namespace
// X udiv 2^C -> X >> C
static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1,
diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 6a6693cc4e1d..a93ffbec324e 100644
--- a/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -582,7 +582,7 @@ struct LoweredPHIRecord {
LoweredPHIRecord(PHINode *pn, unsigned Sh)
: PN(pn), Shift(Sh), Width(0) {}
};
-}
+} // namespace
namespace llvm {
template<>
@@ -603,7 +603,7 @@ namespace llvm {
LHS.Width == RHS.Width;
}
};
-}
+} // namespace llvm
/// SliceUpIllegalIntegerPHI - This is an integer PHI and we know that it has an
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 9d602c6a9e22..53950ae7e2a4 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2353,7 +2353,8 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
// The logic here should be correct for any real-world personality function.
// However if that turns out not to be true, the offending logic can always
// be conditioned on the personality function, like the catch-all logic is.
- EHPersonality Personality = classifyEHPersonality(LI.getPersonalityFn());
+ EHPersonality Personality =
+ classifyEHPersonality(LI.getParent()->getParent()->getPersonalityFn());
// Simplify the list of clauses, eg by removing repeated catch clauses
// (these are often created by inlining).
@@ -2620,7 +2621,6 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
// with a new one.
if (MakeNewInstruction) {
LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
- LI.getPersonalityFn(),
NewClauses.size());
for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
NLI->addClause(NewClauses[i]);
@@ -2691,7 +2691,8 @@ bool InstCombiner::run() {
}
// Instruction isn't dead, see if we can constant propagate it.
- if (!I->use_empty() && isa<Constant>(I->getOperand(0))) {
+ if (!I->use_empty() &&
+ (I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
@@ -2846,7 +2847,8 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL,
}
// ConstantProp instruction if trivially constant.
- if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
+ if (!Inst->use_empty() &&
+ (Inst->getNumOperands() == 0 || isa<Constant>(Inst->getOperand(0))))
if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
<< *Inst << '\n');
@@ -3044,7 +3046,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
};
-}
+} // namespace
void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 25f78b0b2a26..2dd2fe6211c3 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -67,6 +67,7 @@ static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
+static const uint64_t kLinuxKasan_ShadowOffset64 = 0xdffffc0000000000;
static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37;
@@ -106,10 +107,8 @@ static const char *const kAsanUnpoisonStackMemoryName =
static const char *const kAsanOptionDetectUAR =
"__asan_option_detect_stack_use_after_return";
-static const char *const kAsanAllocaPoison =
- "__asan_alloca_poison";
-static const char *const kAsanAllocasUnpoison =
- "__asan_allocas_unpoison";
+static const char *const kAsanAllocaPoison = "__asan_alloca_poison";
+static const char *const kAsanAllocasUnpoison = "__asan_allocas_unpoison";
// Accesses sizes are powers of two: 1, 2, 4, 8, 16.
static const size_t kNumberOfAccessSizes = 5;
@@ -117,6 +116,9 @@ static const size_t kNumberOfAccessSizes = 5;
static const unsigned kAllocaRzSize = 32;
// Command-line flags.
+static cl::opt<bool> ClEnableKasan(
+ "asan-kernel", cl::desc("Enable KernelAddressSanitizer instrumentation"),
+ cl::Hidden, cl::init(false));
// This flag may need to be replaced with -f[no-]asan-reads.
static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
@@ -317,7 +319,8 @@ struct ShadowMapping {
bool OrShadowOffset;
};
-static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) {
+static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize,
+ bool IsKasan) {
bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
bool IsIOS = TargetTriple.isiOS();
bool IsFreeBSD = TargetTriple.isOSFreeBSD();
@@ -352,9 +355,12 @@ static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) {
Mapping.Offset = kPPC64_ShadowOffset64;
else if (IsFreeBSD)
Mapping.Offset = kFreeBSD_ShadowOffset64;
- else if (IsLinux && IsX86_64)
- Mapping.Offset = kSmallX86_64ShadowOffset;
- else if (IsMIPS64)
+ else if (IsLinux && IsX86_64) {
+ if (IsKasan)
+ Mapping.Offset = kLinuxKasan_ShadowOffset64;
+ else
+ Mapping.Offset = kSmallX86_64ShadowOffset;
+ } else if (IsMIPS64)
Mapping.Offset = kMIPS64_ShadowOffset64;
else if (IsAArch64)
Mapping.Offset = kAArch64_ShadowOffset64;
@@ -383,7 +389,8 @@ static size_t RedzoneSizeForScale(int MappingScale) {
/// AddressSanitizer: instrument the code in module to find memory bugs.
struct AddressSanitizer : public FunctionPass {
- AddressSanitizer() : FunctionPass(ID) {
+ explicit AddressSanitizer(bool CompileKernel = false)
+ : FunctionPass(ID), CompileKernel(CompileKernel || ClEnableKasan) {
initializeAddressSanitizerPass(*PassRegistry::getPassRegistry());
}
const char *getPassName() const override {
@@ -410,8 +417,7 @@ struct AddressSanitizer : public FunctionPass {
/// If it is an interesting memory access, return the PointerOperand
/// and set IsWrite/Alignment. Otherwise return nullptr.
Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
- uint64_t *TypeSize,
- unsigned *Alignment);
+ uint64_t *TypeSize, unsigned *Alignment);
void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I,
bool UseCalls, const DataLayout &DL);
void instrumentPointerComparisonOrSubtraction(Instruction *I);
@@ -447,11 +453,12 @@ struct AddressSanitizer : public FunctionPass {
LLVMContext *C;
Triple TargetTriple;
int LongSize;
+ bool CompileKernel;
Type *IntptrTy;
ShadowMapping Mapping;
DominatorTree *DT;
- Function *AsanCtorFunction;
- Function *AsanInitFunction;
+ Function *AsanCtorFunction = nullptr;
+ Function *AsanInitFunction = nullptr;
Function *AsanHandleNoReturnFunc;
Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
// This array is indexed by AccessIsWrite, Experiment and log2(AccessSize).
@@ -470,7 +477,8 @@ struct AddressSanitizer : public FunctionPass {
class AddressSanitizerModule : public ModulePass {
public:
- AddressSanitizerModule() : ModulePass(ID) {}
+ explicit AddressSanitizerModule(bool CompileKernel = false)
+ : ModulePass(ID), CompileKernel(CompileKernel || ClEnableKasan) {}
bool runOnModule(Module &M) override;
static char ID; // Pass identification, replacement for typeid
const char *getPassName() const override { return "AddressSanitizerModule"; }
@@ -487,6 +495,7 @@ class AddressSanitizerModule : public ModulePass {
}
GlobalsMetadata GlobalsMD;
+ bool CompileKernel;
Type *IntptrTy;
LLVMContext *C;
Triple TargetTriple;
@@ -588,7 +597,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
Value *SavedStack) {
IRBuilder<> IRB(InstBefore);
IRB.CreateCall(AsanAllocasUnpoisonFunc,
- {IRB.CreateLoad(DynamicAllocaLayout),
+ {IRB.CreateLoad(DynamicAllocaLayout),
IRB.CreatePtrToInt(SavedStack, IntptrTy)});
}
@@ -692,8 +701,8 @@ INITIALIZE_PASS_END(
AddressSanitizer, "asan",
"AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false,
false)
-FunctionPass *llvm::createAddressSanitizerFunctionPass() {
- return new AddressSanitizer();
+FunctionPass *llvm::createAddressSanitizerFunctionPass(bool CompileKernel) {
+ return new AddressSanitizer(CompileKernel);
}
char AddressSanitizerModule::ID = 0;
@@ -702,8 +711,8 @@ INITIALIZE_PASS(
"AddressSanitizer: detects use-after-free and out-of-bounds bugs."
"ModulePass",
false, false)
-ModulePass *llvm::createAddressSanitizerModulePass() {
- return new AddressSanitizerModule();
+ModulePass *llvm::createAddressSanitizerModulePass(bool CompileKernel) {
+ return new AddressSanitizerModule(CompileKernel);
}
static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
@@ -1347,16 +1356,18 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
int LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
- Mapping = getShadowMapping(TargetTriple, LongSize);
+ Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel);
initializeCallbacks(M);
bool Changed = false;
- Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
- assert(CtorFunc);
- IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
-
- if (ClGlobals) Changed |= InstrumentGlobals(IRB, M);
+ // TODO(glider): temporarily disabled globals instrumentation for KASan.
+ if (ClGlobals && !CompileKernel) {
+ Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
+ assert(CtorFunc);
+ IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
+ Changed |= InstrumentGlobals(IRB, M);
+ }
return Changed;
}
@@ -1369,38 +1380,44 @@ void AddressSanitizer::initializeCallbacks(Module &M) {
for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
const std::string TypeStr = AccessIsWrite ? "store" : "load";
const std::string ExpStr = Exp ? "exp_" : "";
+ const std::string SuffixStr = CompileKernel ? "N" : "_n";
+ const std::string EndingStr = CompileKernel ? "_noabort" : "";
const Type *ExpType = Exp ? Type::getInt32Ty(*C) : nullptr;
+ // TODO(glider): for KASan builds add _noabort to error reporting
+ // functions and make them actually noabort (remove the UnreachableInst).
AsanErrorCallbackSized[AccessIsWrite][Exp] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- kAsanReportErrorTemplate + ExpStr + TypeStr + "_n",
+ kAsanReportErrorTemplate + ExpStr + TypeStr + SuffixStr,
IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
AsanMemoryAccessCallbackSized[AccessIsWrite][Exp] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N",
+ ClMemoryAccessCallbackPrefix + ExpStr + TypeStr + "N" + EndingStr,
IRB.getVoidTy(), IntptrTy, IntptrTy, ExpType, nullptr));
for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
AccessSizeIndex++) {
const std::string Suffix = TypeStr + itostr(1 << AccessSizeIndex);
AsanErrorCallback[AccessIsWrite][Exp][AccessSizeIndex] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- kAsanReportErrorTemplate + ExpStr + Suffix, IRB.getVoidTy(),
- IntptrTy, ExpType, nullptr));
+ kAsanReportErrorTemplate + ExpStr + Suffix,
+ IRB.getVoidTy(), IntptrTy, ExpType, nullptr));
AsanMemoryAccessCallback[AccessIsWrite][Exp][AccessSizeIndex] =
checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + ExpStr + Suffix, IRB.getVoidTy(),
- IntptrTy, ExpType, nullptr));
+ ClMemoryAccessCallbackPrefix + ExpStr + Suffix + EndingStr,
+ IRB.getVoidTy(), IntptrTy, ExpType, nullptr));
}
}
}
+ const std::string MemIntrinCallbackPrefix =
+ CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
AsanMemmove = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
+ MemIntrinCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
AsanMemcpy = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
+ MemIntrinCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
AsanMemset = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
- ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
+ MemIntrinCallbackPrefix + "memset", IRB.getInt8PtrTy(),
IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr));
AsanHandleNoReturnFunc = checkSanitizerInterfaceFunction(
@@ -1427,14 +1444,14 @@ bool AddressSanitizer::doInitialization(Module &M) {
IntptrTy = Type::getIntNTy(*C, LongSize);
TargetTriple = Triple(M.getTargetTriple());
- std::tie(AsanCtorFunction, AsanInitFunction) =
- createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, kAsanInitName,
- /*InitArgTypes=*/{},
- /*InitArgs=*/{});
-
- Mapping = getShadowMapping(TargetTriple, LongSize);
-
- appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
+ if (!CompileKernel) {
+ std::tie(AsanCtorFunction, AsanInitFunction) =
+ createSanitizerCtorAndInitFunctions(M, kAsanModuleCtorName, kAsanInitName,
+ /*InitArgTypes=*/{},
+ /*InitArgs=*/{});
+ appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
+ }
+ Mapping = getShadowMapping(TargetTriple, LongSize, CompileKernel);
return true;
}
@@ -1516,11 +1533,10 @@ bool AddressSanitizer::runOnFunction(Function &F) {
}
}
- bool UseCalls = false;
- if (ClInstrumentationWithCallsThreshold >= 0 &&
- ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
- UseCalls = true;
-
+ bool UseCalls =
+ CompileKernel ||
+ (ClInstrumentationWithCallsThreshold >= 0 &&
+ ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold);
const TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
const DataLayout &DL = F.getParent()->getDataLayout();
@@ -1706,8 +1722,7 @@ void FunctionStackPoisoner::poisonStack() {
if (ClInstrumentAllocas && DynamicAllocaVec.size() > 0) {
// Handle dynamic allocas.
createDynamicAllocasInitStorage();
- for (auto &AI : DynamicAllocaVec)
- handleDynamicAllocaCall(AI);
+ for (auto &AI : DynamicAllocaVec) handleDynamicAllocaCall(AI);
unpoisonDynamicAllocas();
}
@@ -1736,8 +1751,8 @@ void FunctionStackPoisoner::poisonStack() {
ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L);
DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
uint64_t LocalStackSize = L.FrameSize;
- bool DoStackMalloc =
- ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
+ bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
+ LocalStackSize <= kMaxStackMallocSize;
// Don't do dynamic alloca in presence of inline asm: too often it makes
// assumptions on which registers are available. Don't do stack malloc in the
// presence of inline asm on 32-bit platforms for the same reason.
@@ -1901,9 +1916,9 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
// For now just insert the call to ASan runtime.
Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
Value *SizeArg = ConstantInt::get(IntptrTy, Size);
- IRB.CreateCall(DoPoison ? AsanPoisonStackMemoryFunc
- : AsanUnpoisonStackMemoryFunc,
- {AddrArg, SizeArg});
+ IRB.CreateCall(
+ DoPoison ? AsanPoisonStackMemoryFunc : AsanUnpoisonStackMemoryFunc,
+ {AddrArg, SizeArg});
}
// Handling llvm.lifetime intrinsics for a given %alloca:
diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp
index f6858034d79e..a8874251ee07 100644
--- a/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -63,7 +63,7 @@ namespace {
void emitBranchToTrap(Value *Cmp = nullptr);
bool instrument(Value *Ptr, Value *Val, const DataLayout &DL);
};
-}
+} // namespace
char BoundsChecking::ID = 0;
INITIALIZE_PASS(BoundsChecking, "bounds-checking", "Run-time bounds checking",
diff --git a/lib/Transforms/Instrumentation/CMakeLists.txt b/lib/Transforms/Instrumentation/CMakeLists.txt
index b2ff03343eb0..9b81f4bb1619 100644
--- a/lib/Transforms/Instrumentation/CMakeLists.txt
+++ b/lib/Transforms/Instrumentation/CMakeLists.txt
@@ -6,6 +6,7 @@ add_llvm_library(LLVMInstrumentation
MemorySanitizer.cpp
Instrumentation.cpp
InstrProfiling.cpp
+ SafeStack.cpp
SanitizerCoverage.cpp
ThreadSanitizer.cpp
diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 2de6e1afaba9..43091572aeb1 100644
--- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -346,7 +346,7 @@ class DFSanVisitor : public InstVisitor<DFSanVisitor> {
void visitMemTransferInst(MemTransferInst &I);
};
-}
+} // namespace
char DataFlowSanitizer::ID;
INITIALIZE_PASS(DataFlowSanitizer, "dfsan",
diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 9a3ed5c04efc..43caf1fcb8d0 100644
--- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -139,7 +139,7 @@ namespace {
LLVMContext *Ctx;
SmallVector<std::unique_ptr<GCOVFunction>, 16> Funcs;
};
-}
+} // namespace
char GCOVProfiler::ID = 0;
INITIALIZE_PASS(GCOVProfiler, "insert-gcov-profiling",
@@ -419,7 +419,7 @@ namespace {
DenseMap<BasicBlock *, GCOVBlock> Blocks;
GCOVBlock ReturnBlock;
};
-}
+} // namespace
std::string GCOVProfiler::mangleName(const DICompileUnit *CU,
const char *NewStem) {
diff --git a/lib/Transforms/Instrumentation/Instrumentation.cpp b/lib/Transforms/Instrumentation/Instrumentation.cpp
index a91fc0ec2a48..27505859100b 100644
--- a/lib/Transforms/Instrumentation/Instrumentation.cpp
+++ b/lib/Transforms/Instrumentation/Instrumentation.cpp
@@ -30,6 +30,7 @@ void llvm::initializeInstrumentation(PassRegistry &Registry) {
initializeThreadSanitizerPass(Registry);
initializeSanitizerCoverageModulePass(Registry);
initializeDataFlowSanitizerPass(Registry);
+ initializeSafeStackPass(Registry);
}
/// LLVMInitializeInstrumentation - C binding for
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 100824e59af5..63eee2f7153a 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -2022,6 +2022,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Value *CopyOp, *ConvertOp;
switch (I.getNumArgOperands()) {
+ case 3:
+ assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
case 2:
CopyOp = I.getArgOperand(0);
ConvertOp = I.getArgOperand(1);
diff --git a/lib/Transforms/Instrumentation/SafeStack.cpp b/lib/Transforms/Instrumentation/SafeStack.cpp
new file mode 100644
index 000000000000..13c541218313
--- /dev/null
+++ b/lib/Transforms/Instrumentation/SafeStack.cpp
@@ -0,0 +1,608 @@
+//===-- SafeStack.cpp - Safe Stack Insertion ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass splits the stack into the safe stack (kept as-is for LLVM backend)
+// and the unsafe stack (explicitly allocated and managed through the runtime
+// support library).
+//
+// http://clang.llvm.org/docs/SafeStack.html
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_os_ostream.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "safestack"
+
+namespace llvm {
+
+STATISTIC(NumFunctions, "Total number of functions");
+STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
+STATISTIC(NumUnsafeStackRestorePointsFunctions,
+ "Number of functions that use setjmp or exceptions");
+
+STATISTIC(NumAllocas, "Total number of allocas");
+STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
+STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
+STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
+
+} // namespace llvm
+
+namespace {
+
+/// Check whether a given alloca instruction (AI) should be put on the safe
+/// stack or not. The function analyzes all uses of AI and checks whether it is
+/// only accessed in a memory safe way (as decided statically).
+bool IsSafeStackAlloca(const AllocaInst *AI) {
+ // Go through all uses of this alloca and check whether all accesses to the
+ // allocated object are statically known to be memory safe and, hence, the
+ // object can be placed on the safe stack.
+
+ SmallPtrSet<const Value *, 16> Visited;
+ SmallVector<const Instruction *, 8> WorkList;
+ WorkList.push_back(AI);
+
+ // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
+ while (!WorkList.empty()) {
+ const Instruction *V = WorkList.pop_back_val();
+ for (const Use &UI : V->uses()) {
+ auto I = cast<const Instruction>(UI.getUser());
+ assert(V == UI.get());
+
+ switch (I->getOpcode()) {
+ case Instruction::Load:
+ // Loading from a pointer is safe.
+ break;
+ case Instruction::VAArg:
+ // "va-arg" from a pointer is safe.
+ break;
+ case Instruction::Store:
+ if (V == I->getOperand(0))
+ // Stored the pointer - conservatively assume it may be unsafe.
+ return false;
+ // Storing to the pointee is safe.
+ break;
+
+ case Instruction::GetElementPtr:
+ if (!cast<const GetElementPtrInst>(I)->hasAllConstantIndices())
+ // GEP with non-constant indices can lead to memory errors.
+ // This also applies to inbounds GEPs, as the inbounds attribute
+ // represents an assumption that the address is in bounds, rather than
+ // an assertion that it is.
+ return false;
+
+ // We assume that GEP on static alloca with constant indices is safe,
+ // otherwise a compiler would detect it and warn during compilation.
+
+ if (!isa<const ConstantInt>(AI->getArraySize()))
+ // However, if the array size itself is not constant, the access
+ // might still be unsafe at runtime.
+ return false;
+
+ /* fallthrough */
+
+ case Instruction::BitCast:
+ case Instruction::IntToPtr:
+ case Instruction::PHI:
+ case Instruction::PtrToInt:
+ case Instruction::Select:
+ // The object can be safe or not, depending on how the result of the
+ // instruction is used.
+ if (Visited.insert(I).second)
+ WorkList.push_back(cast<const Instruction>(I));
+ break;
+
+ case Instruction::Call:
+ case Instruction::Invoke: {
+ // FIXME: add support for memset and memcpy intrinsics.
+ ImmutableCallSite CS(I);
+
+ // LLVM 'nocapture' attribute is only set for arguments whose address
+ // is not stored, passed around, or used in any other non-trivial way.
+ // We assume that passing a pointer to an object as a 'nocapture'
+ // argument is safe.
+ // FIXME: a more precise solution would require an interprocedural
+ // analysis here, which would look at all uses of an argument inside
+ // the function being called.
+ ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
+ for (ImmutableCallSite::arg_iterator A = B; A != E; ++A)
+ if (A->get() == V && !CS.doesNotCapture(A - B))
+ // The parameter is not marked 'nocapture' - unsafe.
+ return false;
+ continue;
+ }
+
+ default:
+ // The object is unsafe if it is used in any other way.
+ return false;
+ }
+ }
+ }
+
+ // All uses of the alloca are safe, we can place it on the safe stack.
+ return true;
+}
+
+/// The SafeStack pass splits the stack of each function into the
+/// safe stack, which is only accessed through memory safe dereferences
+/// (as determined statically), and the unsafe stack, which contains all
+/// local variables that are accessed in unsafe ways.
+class SafeStack : public FunctionPass {
+ const DataLayout *DL;
+
+ Type *StackPtrTy;
+ Type *IntPtrTy;
+ Type *Int32Ty;
+ Type *Int8Ty;
+
+ Constant *UnsafeStackPtr;
+
+ /// Unsafe stack alignment. Each stack frame must ensure that the stack is
+ /// aligned to this value. We need to re-align the unsafe stack if the
+ /// alignment of any object on the stack exceeds this value.
+ ///
+ /// 16 seems like a reasonable upper bound on the alignment of objects that we
+ /// might expect to appear on the stack on most common targets.
+ enum { StackAlignment = 16 };
+
+ /// \brief Build a constant representing a pointer to the unsafe stack
+ /// pointer.
+ Constant *getOrCreateUnsafeStackPtr(Module &M);
+
+ /// \brief Find all static allocas, dynamic allocas, return instructions and
+ /// stack restore points (exception unwind blocks and setjmp calls) in the
+ /// given function and append them to the respective vectors.
+ void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
+ SmallVectorImpl<AllocaInst *> &DynamicAllocas,
+ SmallVectorImpl<ReturnInst *> &Returns,
+ SmallVectorImpl<Instruction *> &StackRestorePoints);
+
+ /// \brief Allocate space for all static allocas in \p StaticAllocas,
+ /// replace allocas with pointers into the unsafe stack and generate code to
+ /// restore the stack pointer before all return instructions in \p Returns.
+ ///
+ /// \returns A pointer to the top of the unsafe stack after all unsafe static
+ /// allocas are allocated.
+ Value *moveStaticAllocasToUnsafeStack(Function &F,
+ ArrayRef<AllocaInst *> StaticAllocas,
+ ArrayRef<ReturnInst *> Returns);
+
+ /// \brief Generate code to restore the stack after all stack restore points
+ /// in \p StackRestorePoints.
+ ///
+ /// \returns A local variable in which to maintain the dynamic top of the
+ /// unsafe stack if needed.
+ AllocaInst *
+ createStackRestorePoints(Function &F,
+ ArrayRef<Instruction *> StackRestorePoints,
+ Value *StaticTop, bool NeedDynamicTop);
+
+ /// \brief Replace all allocas in \p DynamicAllocas with code to allocate
+ /// space dynamically on the unsafe stack and store the dynamic unsafe stack
+ /// top to \p DynamicTop if non-null.
+ void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
+ AllocaInst *DynamicTop,
+ ArrayRef<AllocaInst *> DynamicAllocas);
+
+public:
+ static char ID; // Pass identification, replacement for typeid.
+ SafeStack() : FunctionPass(ID), DL(nullptr) {
+ initializeSafeStackPass(*PassRegistry::getPassRegistry());
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<AliasAnalysis>();
+ }
+
+ virtual bool doInitialization(Module &M) {
+ DL = &M.getDataLayout();
+
+ StackPtrTy = Type::getInt8PtrTy(M.getContext());
+ IntPtrTy = DL->getIntPtrType(M.getContext());
+ Int32Ty = Type::getInt32Ty(M.getContext());
+ Int8Ty = Type::getInt8Ty(M.getContext());
+
+ UnsafeStackPtr = getOrCreateUnsafeStackPtr(M);
+
+ return false;
+ }
+
+ bool runOnFunction(Function &F);
+
+}; // class SafeStack
+
+Constant *SafeStack::getOrCreateUnsafeStackPtr(Module &M) {
+ // The unsafe stack pointer is stored in a global variable with a magic name.
+ const char *kUnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
+
+ auto UnsafeStackPtr =
+ dyn_cast_or_null<GlobalVariable>(M.getNamedValue(kUnsafeStackPtrVar));
+
+ if (!UnsafeStackPtr) {
+ // The global variable is not defined yet, define it ourselves.
+ // We use the initial-exec TLS model because we do not support the variable
+ // living anywhere other than in the main executable.
+ UnsafeStackPtr = new GlobalVariable(
+ /*Module=*/M, /*Type=*/StackPtrTy,
+ /*isConstant=*/false, /*Linkage=*/GlobalValue::ExternalLinkage,
+ /*Initializer=*/0, /*Name=*/kUnsafeStackPtrVar,
+ /*InsertBefore=*/nullptr,
+ /*ThreadLocalMode=*/GlobalValue::InitialExecTLSModel);
+ } else {
+ // The variable exists, check its type and attributes.
+ if (UnsafeStackPtr->getValueType() != StackPtrTy) {
+ report_fatal_error(Twine(kUnsafeStackPtrVar) + " must have void* type");
+ }
+
+ if (!UnsafeStackPtr->isThreadLocal()) {
+ report_fatal_error(Twine(kUnsafeStackPtrVar) + " must be thread-local");
+ }
+ }
+
+ return UnsafeStackPtr;
+}
+
+void SafeStack::findInsts(Function &F,
+ SmallVectorImpl<AllocaInst *> &StaticAllocas,
+ SmallVectorImpl<AllocaInst *> &DynamicAllocas,
+ SmallVectorImpl<ReturnInst *> &Returns,
+ SmallVectorImpl<Instruction *> &StackRestorePoints) {
+ for (Instruction &I : inst_range(&F)) {
+ if (auto AI = dyn_cast<AllocaInst>(&I)) {
+ ++NumAllocas;
+
+ if (IsSafeStackAlloca(AI))
+ continue;
+
+ if (AI->isStaticAlloca()) {
+ ++NumUnsafeStaticAllocas;
+ StaticAllocas.push_back(AI);
+ } else {
+ ++NumUnsafeDynamicAllocas;
+ DynamicAllocas.push_back(AI);
+ }
+ } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
+ Returns.push_back(RI);
+ } else if (auto CI = dyn_cast<CallInst>(&I)) {
+ // setjmps require stack restore.
+ if (CI->getCalledFunction() && CI->canReturnTwice())
+ StackRestorePoints.push_back(CI);
+ } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
+ // Exception landing pads require stack restore.
+ StackRestorePoints.push_back(LP);
+ } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
+ if (II->getIntrinsicID() == Intrinsic::gcroot)
+ llvm::report_fatal_error(
+ "gcroot intrinsic not compatible with safestack attribute");
+ }
+ }
+}
+
+AllocaInst *
+SafeStack::createStackRestorePoints(Function &F,
+ ArrayRef<Instruction *> StackRestorePoints,
+ Value *StaticTop, bool NeedDynamicTop) {
+ if (StackRestorePoints.empty())
+ return nullptr;
+
+ IRBuilder<> IRB(StaticTop
+ ? cast<Instruction>(StaticTop)->getNextNode()
+ : (Instruction *)F.getEntryBlock().getFirstInsertionPt());
+
+ // We need the current value of the shadow stack pointer to restore
+ // after longjmp or exception catching.
+
+ // FIXME: On some platforms this could be handled by the longjmp/exception
+ // runtime itself.
+
+ AllocaInst *DynamicTop = nullptr;
+ if (NeedDynamicTop)
+ // If we also have dynamic alloca's, the stack pointer value changes
+ // throughout the function. For now we store it in an alloca.
+ DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
+ "unsafe_stack_dynamic_ptr");
+
+ if (!StaticTop)
+ // We need the original unsafe stack pointer value, even if there are
+ // no unsafe static allocas.
+ StaticTop = IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
+
+ if (NeedDynamicTop)
+ IRB.CreateStore(StaticTop, DynamicTop);
+
+ // Restore current stack pointer after longjmp/exception catch.
+ for (Instruction *I : StackRestorePoints) {
+ ++NumUnsafeStackRestorePoints;
+
+ IRB.SetInsertPoint(cast<Instruction>(I->getNextNode()));
+ Value *CurrentTop = DynamicTop ? IRB.CreateLoad(DynamicTop) : StaticTop;
+ IRB.CreateStore(CurrentTop, UnsafeStackPtr);
+ }
+
+ return DynamicTop;
+}
+
+Value *
+SafeStack::moveStaticAllocasToUnsafeStack(Function &F,
+ ArrayRef<AllocaInst *> StaticAllocas,
+ ArrayRef<ReturnInst *> Returns) {
+ if (StaticAllocas.empty())
+ return nullptr;
+
+ IRBuilder<> IRB(F.getEntryBlock().getFirstInsertionPt());
+ DIBuilder DIB(*F.getParent());
+
+ // We explicitly compute and set the unsafe stack layout for all unsafe
+ // static alloca instructions. We save the unsafe "base pointer" in the
+ // prologue into a local variable and restore it in the epilogue.
+
+ // Load the current stack pointer (we'll also use it as a base pointer).
+ // FIXME: use a dedicated register for it ?
+ Instruction *BasePointer =
+ IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
+ assert(BasePointer->getType() == StackPtrTy);
+
+ for (ReturnInst *RI : Returns) {
+ IRB.SetInsertPoint(RI);
+ IRB.CreateStore(BasePointer, UnsafeStackPtr);
+ }
+
+ // Compute maximum alignment among static objects on the unsafe stack.
+ unsigned MaxAlignment = 0;
+ for (AllocaInst *AI : StaticAllocas) {
+ Type *Ty = AI->getAllocatedType();
+ unsigned Align =
+ std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment());
+ if (Align > MaxAlignment)
+ MaxAlignment = Align;
+ }
+
+ if (MaxAlignment > StackAlignment) {
+ // Re-align the base pointer according to the max requested alignment.
+ assert(isPowerOf2_32(MaxAlignment));
+ IRB.SetInsertPoint(cast<Instruction>(BasePointer->getNextNode()));
+ BasePointer = cast<Instruction>(IRB.CreateIntToPtr(
+ IRB.CreateAnd(IRB.CreatePtrToInt(BasePointer, IntPtrTy),
+ ConstantInt::get(IntPtrTy, ~uint64_t(MaxAlignment - 1))),
+ StackPtrTy));
+ }
+
+ // Allocate space for every unsafe static AllocaInst on the unsafe stack.
+ int64_t StaticOffset = 0; // Current stack top.
+ for (AllocaInst *AI : StaticAllocas) {
+ IRB.SetInsertPoint(AI);
+
+ auto CArraySize = cast<ConstantInt>(AI->getArraySize());
+ Type *Ty = AI->getAllocatedType();
+
+ uint64_t Size = DL->getTypeAllocSize(Ty) * CArraySize->getZExtValue();
+ if (Size == 0)
+ Size = 1; // Don't create zero-sized stack objects.
+
+ // Ensure the object is properly aligned.
+ unsigned Align =
+ std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment());
+
+ // Add alignment.
+ // NOTE: we ensure that BasePointer itself is aligned to >= Align.
+ StaticOffset += Size;
+ StaticOffset = RoundUpToAlignment(StaticOffset, Align);
+
+ Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
+ ConstantInt::get(Int32Ty, -StaticOffset));
+ Value *NewAI = IRB.CreateBitCast(Off, AI->getType(), AI->getName());
+ if (AI->hasName() && isa<Instruction>(NewAI))
+ cast<Instruction>(NewAI)->takeName(AI);
+
+ // Replace alloc with the new location.
+ replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true);
+ AI->replaceAllUsesWith(NewAI);
+ AI->eraseFromParent();
+ }
+
+ // Re-align BasePointer so that our callees would see it aligned as
+ // expected.
+ // FIXME: no need to update BasePointer in leaf functions.
+ StaticOffset = RoundUpToAlignment(StaticOffset, StackAlignment);
+
+ // Update shadow stack pointer in the function epilogue.
+ IRB.SetInsertPoint(cast<Instruction>(BasePointer->getNextNode()));
+
+ Value *StaticTop =
+ IRB.CreateGEP(BasePointer, ConstantInt::get(Int32Ty, -StaticOffset),
+ "unsafe_stack_static_top");
+ IRB.CreateStore(StaticTop, UnsafeStackPtr);
+ return StaticTop;
+}
+
+void SafeStack::moveDynamicAllocasToUnsafeStack(
+ Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
+ ArrayRef<AllocaInst *> DynamicAllocas) {
+ DIBuilder DIB(*F.getParent());
+
+ for (AllocaInst *AI : DynamicAllocas) {
+ IRBuilder<> IRB(AI);
+
+ // Compute the new SP value (after AI).
+ Value *ArraySize = AI->getArraySize();
+ if (ArraySize->getType() != IntPtrTy)
+ ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false);
+
+ Type *Ty = AI->getAllocatedType();
+ uint64_t TySize = DL->getTypeAllocSize(Ty);
+ Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
+
+ Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(UnsafeStackPtr), IntPtrTy);
+ SP = IRB.CreateSub(SP, Size);
+
+ // Align the SP value to satisfy the AllocaInst, type and stack alignments.
+ unsigned Align = std::max(
+ std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment()),
+ (unsigned)StackAlignment);
+
+ assert(isPowerOf2_32(Align));
+ Value *NewTop = IRB.CreateIntToPtr(
+ IRB.CreateAnd(SP, ConstantInt::get(IntPtrTy, ~uint64_t(Align - 1))),
+ StackPtrTy);
+
+ // Save the stack pointer.
+ IRB.CreateStore(NewTop, UnsafeStackPtr);
+ if (DynamicTop)
+ IRB.CreateStore(NewTop, DynamicTop);
+
+ Value *NewAI = IRB.CreateIntToPtr(SP, AI->getType());
+ if (AI->hasName() && isa<Instruction>(NewAI))
+ NewAI->takeName(AI);
+
+ replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true);
+ AI->replaceAllUsesWith(NewAI);
+ AI->eraseFromParent();
+ }
+
+ if (!DynamicAllocas.empty()) {
+ // Now go through the instructions again, replacing stacksave/stackrestore.
+ for (inst_iterator It = inst_begin(&F), Ie = inst_end(&F); It != Ie;) {
+ Instruction *I = &*(It++);
+ auto II = dyn_cast<IntrinsicInst>(I);
+ if (!II)
+ continue;
+
+ if (II->getIntrinsicID() == Intrinsic::stacksave) {
+ IRBuilder<> IRB(II);
+ Instruction *LI = IRB.CreateLoad(UnsafeStackPtr);
+ LI->takeName(II);
+ II->replaceAllUsesWith(LI);
+ II->eraseFromParent();
+ } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
+ IRBuilder<> IRB(II);
+ Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
+ SI->takeName(II);
+ assert(II->use_empty());
+ II->eraseFromParent();
+ }
+ }
+ }
+}
+
+bool SafeStack::runOnFunction(Function &F) {
+ auto AA = &getAnalysis<AliasAnalysis>();
+
+ DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
+
+ if (!F.hasFnAttribute(Attribute::SafeStack)) {
+ DEBUG(dbgs() << "[SafeStack] safestack is not requested"
+ " for this function\n");
+ return false;
+ }
+
+ if (F.isDeclaration()) {
+ DEBUG(dbgs() << "[SafeStack] function definition"
+ " is not available\n");
+ return false;
+ }
+
+ {
+ // Make sure the regular stack protector won't run on this function
+ // (safestack attribute takes precedence).
+ AttrBuilder B;
+ B.addAttribute(Attribute::StackProtect)
+ .addAttribute(Attribute::StackProtectReq)
+ .addAttribute(Attribute::StackProtectStrong);
+ F.removeAttributes(
+ AttributeSet::FunctionIndex,
+ AttributeSet::get(F.getContext(), AttributeSet::FunctionIndex, B));
+ }
+
+ if (AA->onlyReadsMemory(&F)) {
+ // XXX: we don't protect against information leak attacks for now.
+ DEBUG(dbgs() << "[SafeStack] function only reads memory\n");
+ return false;
+ }
+
+ ++NumFunctions;
+
+ SmallVector<AllocaInst *, 16> StaticAllocas;
+ SmallVector<AllocaInst *, 4> DynamicAllocas;
+ SmallVector<ReturnInst *, 4> Returns;
+
+ // Collect all points where stack gets unwound and needs to be restored
+ // This is only necessary because the runtime (setjmp and unwind code) is
+ // not aware of the unsafe stack and won't unwind/restore it prorerly.
+ // To work around this problem without changing the runtime, we insert
+ // instrumentation to restore the unsafe stack pointer when necessary.
+ SmallVector<Instruction *, 4> StackRestorePoints;
+
+ // Find all static and dynamic alloca instructions that must be moved to the
+ // unsafe stack, all return instructions and stack restore points.
+ findInsts(F, StaticAllocas, DynamicAllocas, Returns, StackRestorePoints);
+
+ if (StaticAllocas.empty() && DynamicAllocas.empty() &&
+ StackRestorePoints.empty())
+ return false; // Nothing to do in this function.
+
+ if (!StaticAllocas.empty() || !DynamicAllocas.empty())
+ ++NumUnsafeStackFunctions; // This function has the unsafe stack.
+
+ if (!StackRestorePoints.empty())
+ ++NumUnsafeStackRestorePointsFunctions;
+
+ // The top of the unsafe stack after all unsafe static allocas are allocated.
+ Value *StaticTop = moveStaticAllocasToUnsafeStack(F, StaticAllocas, Returns);
+
+ // Safe stack object that stores the current unsafe stack top. It is updated
+ // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
+ // This is only needed if we need to restore stack pointer after longjmp
+ // or exceptions, and we have dynamic allocations.
+ // FIXME: a better alternative might be to store the unsafe stack pointer
+ // before setjmp / invoke instructions.
+ AllocaInst *DynamicTop = createStackRestorePoints(
+ F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
+
+ // Handle dynamic allocas.
+ moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
+ DynamicAllocas);
+
+ DEBUG(dbgs() << "[SafeStack] safestack applied\n");
+ return true;
+}
+
+} // end anonymous namespace
+
+char SafeStack::ID = 0;
+INITIALIZE_PASS_BEGIN(SafeStack, "safe-stack",
+ "Safe Stack instrumentation pass", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_END(SafeStack, "safe-stack", "Safe Stack instrumentation pass",
+ false, false)
+
+FunctionPass *llvm::createSafeStackPass() { return new SafeStack(); }
diff --git a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
index f6ae0c2dd5f9..dff39efa5b96 100644
--- a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
+++ b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp
@@ -33,6 +33,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
@@ -385,9 +386,14 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
}
bool IsEntryBB = &BB == &F.getEntryBlock();
- DebugLoc EntryLoc = IsEntryBB && IP->getDebugLoc()
- ? IP->getDebugLoc().getFnDebugLoc()
- : IP->getDebugLoc();
+ DebugLoc EntryLoc;
+ if (IsEntryBB) {
+ if (auto SP = getDISubprogram(&F))
+ EntryLoc = DebugLoc::get(SP->getScopeLine(), 0, SP);
+ } else {
+ EntryLoc = IP->getDebugLoc();
+ }
+
IRBuilder<> IRB(IP);
IRB.SetCurrentDebugLocation(EntryLoc);
SmallVector<Value *, 1> Indices;
diff --git a/lib/Transforms/ObjCARC/BlotMapVector.h b/lib/Transforms/ObjCARC/BlotMapVector.h
index d6439b698418..f9fde262b657 100644
--- a/lib/Transforms/ObjCARC/BlotMapVector.h
+++ b/lib/Transforms/ObjCARC/BlotMapVector.h
@@ -105,4 +105,4 @@ public:
return Map.empty();
}
};
-} //
+} // namespace llvm
diff --git a/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
index d318643a359a..c7c77eca5af4 100644
--- a/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
@@ -50,7 +50,7 @@ namespace {
initializeObjCARCAPElimPass(*PassRegistry::getPassRegistry());
}
};
-}
+} // namespace
char ObjCARCAPElim::ID = 0;
INITIALIZE_PASS(ObjCARCAPElim,
diff --git a/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp b/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
index b1515e386207..94b092cc2aa3 100644
--- a/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.cpp
@@ -58,7 +58,8 @@ ObjCARCAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
}
AliasAnalysis::AliasResult
-ObjCARCAliasAnalysis::alias(const Location &LocA, const Location &LocB) {
+ObjCARCAliasAnalysis::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
if (!EnableARCOpts)
return AliasAnalysis::alias(LocA, LocB);
@@ -67,8 +68,8 @@ ObjCARCAliasAnalysis::alias(const Location &LocA, const Location &LocB) {
const Value *SA = GetRCIdentityRoot(LocA.Ptr);
const Value *SB = GetRCIdentityRoot(LocB.Ptr);
AliasResult Result =
- AliasAnalysis::alias(Location(SA, LocA.Size, LocA.AATags),
- Location(SB, LocB.Size, LocB.AATags));
+ AliasAnalysis::alias(MemoryLocation(SA, LocA.Size, LocA.AATags),
+ MemoryLocation(SB, LocB.Size, LocB.AATags));
if (Result != MayAlias)
return Result;
@@ -77,7 +78,7 @@ ObjCARCAliasAnalysis::alias(const Location &LocA, const Location &LocB) {
const Value *UA = GetUnderlyingObjCPtr(SA, *DL);
const Value *UB = GetUnderlyingObjCPtr(SB, *DL);
if (UA != SA || UB != SB) {
- Result = AliasAnalysis::alias(Location(UA), Location(UB));
+ Result = AliasAnalysis::alias(MemoryLocation(UA), MemoryLocation(UB));
// We can't use MustAlias or PartialAlias results here because
// GetUnderlyingObjCPtr may return an offsetted pointer value.
if (Result == NoAlias)
@@ -89,24 +90,23 @@ ObjCARCAliasAnalysis::alias(const Location &LocA, const Location &LocB) {
return MayAlias;
}
-bool
-ObjCARCAliasAnalysis::pointsToConstantMemory(const Location &Loc,
- bool OrLocal) {
+bool ObjCARCAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) {
if (!EnableARCOpts)
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
// First, strip off no-ops, including ObjC-specific no-ops, and try making
// a precise alias query.
const Value *S = GetRCIdentityRoot(Loc.Ptr);
- if (AliasAnalysis::pointsToConstantMemory(Location(S, Loc.Size, Loc.AATags),
- OrLocal))
+ if (AliasAnalysis::pointsToConstantMemory(
+ MemoryLocation(S, Loc.Size, Loc.AATags), OrLocal))
return true;
// If that failed, climb to the underlying object, including climbing through
// ObjC-specific no-ops, and try making an imprecise alias query.
const Value *U = GetUnderlyingObjCPtr(S, *DL);
if (U != S)
- return AliasAnalysis::pointsToConstantMemory(Location(U), OrLocal);
+ return AliasAnalysis::pointsToConstantMemory(MemoryLocation(U), OrLocal);
// If that failed, fail. We don't need to chain here, since that's covered
// by the earlier precise query.
@@ -135,7 +135,8 @@ ObjCARCAliasAnalysis::getModRefBehavior(const Function *F) {
}
AliasAnalysis::ModRefResult
-ObjCARCAliasAnalysis::getModRefInfo(ImmutableCallSite CS, const Location &Loc) {
+ObjCARCAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+ const MemoryLocation &Loc) {
if (!EnableARCOpts)
return AliasAnalysis::getModRefInfo(CS, Loc);
diff --git a/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h b/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
index 3c5a021de267..eecc82fe572c 100644
--- a/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
+++ b/lib/Transforms/ObjCARC/ObjCARCAliasAnalysis.h
@@ -56,12 +56,14 @@ namespace objcarc {
}
void getAnalysisUsage(AnalysisUsage &AU) const override;
- AliasResult alias(const Location &LocA, const Location &LocB) override;
- bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override;
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override;
ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
ModRefBehavior getModRefBehavior(const Function *F) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) override;
+ const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override;
};
diff --git a/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/lib/Transforms/ObjCARC/ObjCARCContract.cpp
index e7731ad5cd17..080dbc0cdc2d 100644
--- a/lib/Transforms/ObjCARC/ObjCARCContract.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -101,7 +101,7 @@ namespace {
initializeObjCARCContractPass(*PassRegistry::getPassRegistry());
}
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
// Implementation
@@ -200,7 +200,7 @@ static StoreInst *findSafeStoreForStoreStrongContraction(LoadInst *Load,
bool SawRelease = false;
// Get the location associated with Load.
- AliasAnalysis::Location Loc = MemoryLocation::get(Load);
+ MemoryLocation Loc = MemoryLocation::get(Load);
// Walk down to find the store and the release, which may be in either order.
for (auto I = std::next(BasicBlock::iterator(Load)),
@@ -212,7 +212,7 @@ static StoreInst *findSafeStoreForStoreStrongContraction(LoadInst *Load,
break;
// Now we know that we have not seen either the store or the release. If I
- // is the the release, mark that we saw the release and continue.
+ // is the release, mark that we saw the release and continue.
Instruction *Inst = &*I;
if (Inst == Release) {
SawRelease = true;
diff --git a/lib/Transforms/ObjCARC/ObjCARCExpand.cpp b/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
index 53c19c39f97f..4f2f7da7a88e 100644
--- a/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
@@ -63,7 +63,7 @@ namespace {
initializeObjCARCExpandPass(*PassRegistry::getPassRegistry());
}
};
-}
+} // namespace
char ObjCARCExpand::ID = 0;
INITIALIZE_PASS(ObjCARCExpand,
diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index dca3f1b03fbb..cdbbfac4813b 100644
--- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -313,7 +313,7 @@ namespace {
};
const unsigned BBState::OverflowOccurredValue = 0xffffffff;
-}
+} // namespace
namespace llvm {
raw_ostream &operator<<(raw_ostream &OS,
@@ -551,7 +551,7 @@ namespace {
initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
}
};
-}
+} // namespace
char ObjCARCOpt::ID = 0;
INITIALIZE_PASS_BEGIN(ObjCARCOpt,
diff --git a/lib/Transforms/Scalar/ADCE.cpp b/lib/Transforms/Scalar/ADCE.cpp
index d6fc91641588..fe0224bb56c7 100644
--- a/lib/Transforms/Scalar/ADCE.cpp
+++ b/lib/Transforms/Scalar/ADCE.cpp
@@ -44,7 +44,7 @@ struct ADCE : public FunctionPass {
AU.setPreservesCFG();
}
};
-}
+} // namespace
char ADCE::ID = 0;
INITIALIZE_PASS(ADCE, "adce", "Aggressive Dead Code Elimination", false, false)
diff --git a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
index 8918909f484a..a4e5446a2b12 100644
--- a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -76,7 +76,7 @@ struct AlignmentFromAssumptions : public FunctionPass {
const SCEV *&OffSCEV);
bool processAssumption(CallInst *I);
};
-}
+} // namespace
char AlignmentFromAssumptions::ID = 0;
static const char aip_name[] = "Alignment from assumptions";
diff --git a/lib/Transforms/Scalar/BDCE.cpp b/lib/Transforms/Scalar/BDCE.cpp
index 09c605e76737..8ffbacddda68 100644
--- a/lib/Transforms/Scalar/BDCE.cpp
+++ b/lib/Transforms/Scalar/BDCE.cpp
@@ -66,7 +66,7 @@ struct BDCE : public FunctionPass {
AssumptionCache *AC;
DominatorTree *DT;
};
-}
+} // namespace
char BDCE::ID = 0;
INITIALIZE_PASS_BEGIN(BDCE, "bdce", "Bit-Tracking Dead Code Elimination",
diff --git a/lib/Transforms/Scalar/ConstantHoisting.cpp b/lib/Transforms/Scalar/ConstantHoisting.cpp
index 4288742dd3eb..cc1dc9435a05 100644
--- a/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -171,7 +171,7 @@ private:
void deleteDeadCastInst() const;
bool optimizeConstants(Function &Fn);
};
-}
+} // namespace
char ConstantHoisting::ID = 0;
INITIALIZE_PASS_BEGIN(ConstantHoisting, "consthoist", "Constant Hoisting",
diff --git a/lib/Transforms/Scalar/ConstantProp.cpp b/lib/Transforms/Scalar/ConstantProp.cpp
index c974ebb9456f..e3df86ecf169 100644
--- a/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/lib/Transforms/Scalar/ConstantProp.cpp
@@ -47,7 +47,7 @@ namespace {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
-}
+} // namespace
char ConstantPropagation::ID = 0;
INITIALIZE_PASS_BEGIN(ConstantPropagation, "constprop",
diff --git a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 79624b2e4c47..b1809b7fae08 100644
--- a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -56,7 +56,7 @@ namespace {
AU.addRequired<LazyValueInfo>();
}
};
-}
+} // namespace
char CorrelatedValuePropagation::ID = 0;
INITIALIZE_PASS_BEGIN(CorrelatedValuePropagation, "correlated-propagation",
diff --git a/lib/Transforms/Scalar/DCE.cpp b/lib/Transforms/Scalar/DCE.cpp
index 3b262a23091f..aa628e5aca81 100644
--- a/lib/Transforms/Scalar/DCE.cpp
+++ b/lib/Transforms/Scalar/DCE.cpp
@@ -60,7 +60,7 @@ namespace {
AU.setPreservesCFG();
}
};
-}
+} // namespace
char DeadInstElimination::ID = 0;
INITIALIZE_PASS(DeadInstElimination, "die",
@@ -87,7 +87,7 @@ namespace {
AU.setPreservesCFG();
}
};
-}
+} // namespace
char DCE::ID = 0;
INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false)
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index eb48a766a2cf..c99dc5fc8445 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -78,7 +78,7 @@ namespace {
bool runOnBasicBlock(BasicBlock &BB);
bool HandleFree(CallInst *F);
bool handleEndBlock(BasicBlock &BB);
- void RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
+ void RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
SmallSetVector<Value *, 16> &DeadStackObjects,
const DataLayout &DL);
@@ -92,7 +92,7 @@ namespace {
AU.addPreserved<MemoryDependenceAnalysis>();
}
};
-}
+} // namespace
char DSE::ID = 0;
INITIALIZE_PASS_BEGIN(DSE, "dse", "Dead Store Elimination", false, false)
@@ -194,37 +194,37 @@ static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
/// getLocForWrite - Return a Location stored to by the specified instruction.
/// If isRemovable returns true, this function and getLocForRead completely
/// describe the memory operations for this instruction.
-static AliasAnalysis::Location
-getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
+static MemoryLocation getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return MemoryLocation::get(SI);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Inst)) {
// memcpy/memmove/memset.
- AliasAnalysis::Location Loc = MemoryLocation::getForDest(MI);
+ MemoryLocation Loc = MemoryLocation::getForDest(MI);
return Loc;
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
- if (!II) return AliasAnalysis::Location();
+ if (!II)
+ return MemoryLocation();
switch (II->getIntrinsicID()) {
- default: return AliasAnalysis::Location(); // Unhandled intrinsic.
+ default:
+ return MemoryLocation(); // Unhandled intrinsic.
case Intrinsic::init_trampoline:
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
- return AliasAnalysis::Location(II->getArgOperand(0));
+ return MemoryLocation(II->getArgOperand(0));
case Intrinsic::lifetime_end: {
uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- return AliasAnalysis::Location(II->getArgOperand(1), Len);
+ return MemoryLocation(II->getArgOperand(1), Len);
}
}
}
/// getLocForRead - Return the location read by the specified "hasMemoryWrite"
/// instruction if any.
-static AliasAnalysis::Location
-getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
+static MemoryLocation getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
"Unknown instruction case");
@@ -232,7 +232,7 @@ getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
// instructions (memcpy/memmove).
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst))
return MemoryLocation::getForSource(MTI);
- return AliasAnalysis::Location();
+ return MemoryLocation();
}
@@ -317,7 +317,7 @@ static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
uint64_t Size;
if (getObjectSize(V, Size, DL, TLI))
return Size;
- return AliasAnalysis::UnknownSize;
+ return MemoryLocation::UnknownSize;
}
namespace {
@@ -333,8 +333,8 @@ namespace {
/// completely overwrites a store to the 'Earlier' location.
/// 'OverwriteEnd' if the end of the 'Earlier' location is completely
/// overwritten by 'Later', or 'OverwriteUnknown' if nothing can be determined
-static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
- const AliasAnalysis::Location &Earlier,
+static OverwriteResult isOverwrite(const MemoryLocation &Later,
+ const MemoryLocation &Earlier,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
int64_t &EarlierOff, int64_t &LaterOff) {
@@ -346,8 +346,8 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
if (P1 == P2) {
// If we don't know the sizes of either access, then we can't do a
// comparison.
- if (Later.Size == AliasAnalysis::UnknownSize ||
- Earlier.Size == AliasAnalysis::UnknownSize)
+ if (Later.Size == MemoryLocation::UnknownSize ||
+ Earlier.Size == MemoryLocation::UnknownSize)
return OverwriteUnknown;
// Make sure that the Later size is >= the Earlier size.
@@ -357,8 +357,8 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// Otherwise, we have to have size information, and the later store has to be
// larger than the earlier one.
- if (Later.Size == AliasAnalysis::UnknownSize ||
- Earlier.Size == AliasAnalysis::UnknownSize)
+ if (Later.Size == MemoryLocation::UnknownSize ||
+ Earlier.Size == MemoryLocation::UnknownSize)
return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
@@ -374,7 +374,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// If the "Later" store is to a recognizable object, get its size.
uint64_t ObjectSize = getPointerSize(UO2, DL, TLI);
- if (ObjectSize != AliasAnalysis::UnknownSize)
+ if (ObjectSize != MemoryLocation::UnknownSize)
if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
return OverwriteComplete;
@@ -441,11 +441,11 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
/// This function detects when it is unsafe to remove a dependent instruction
/// because the DSE inducing instruction may be a self-read.
static bool isPossibleSelfRead(Instruction *Inst,
- const AliasAnalysis::Location &InstStoreLoc,
+ const MemoryLocation &InstStoreLoc,
Instruction *DepWrite, AliasAnalysis &AA) {
// Self reads can only happen for instructions that read memory. Get the
// location read.
- AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
+ MemoryLocation InstReadLoc = getLocForRead(Inst, AA);
if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
// If the read and written loc obviously don't alias, it isn't a read.
@@ -459,7 +459,7 @@ static bool isPossibleSelfRead(Instruction *Inst,
// Here we don't know if A/B may alias, but we do know that B/B are must
// aliases, so removing the first memcpy is safe (assuming it writes <= #
// bytes as the second one.
- AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
+ MemoryLocation DepReadLoc = getLocForRead(DepWrite, AA);
if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
return false;
@@ -525,7 +525,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
}
// Figure out what location is being stored to.
- AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
+ MemoryLocation Loc = getLocForWrite(Inst, *AA);
// If we didn't get a useful location, fail.
if (!Loc.Ptr)
@@ -540,7 +540,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
//
// Find out what memory location the dependent instruction stores.
Instruction *DepWrite = InstDep.getInst();
- AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
+ MemoryLocation DepLoc = getLocForWrite(DepWrite, *AA);
// If we didn't get a useful location, or if it isn't a size, bail out.
if (!DepLoc.Ptr)
break;
@@ -645,7 +645,7 @@ static void FindUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
bool DSE::HandleFree(CallInst *F) {
bool MadeChange = false;
- AliasAnalysis::Location Loc = AliasAnalysis::Location(F->getOperand(0));
+ MemoryLocation Loc = MemoryLocation(F->getOperand(0));
SmallVector<BasicBlock *, 16> Blocks;
Blocks.push_back(F->getParent());
const DataLayout &DL = F->getModule()->getDataLayout();
@@ -809,7 +809,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
continue;
}
- AliasAnalysis::Location LoadedLoc;
+ MemoryLocation LoadedLoc;
// If we encounter a use of the pointer, it is no longer considered dead
if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
@@ -845,7 +845,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
/// RemoveAccessedObjects - Check to see if the specified location may alias any
/// of the stack objects in the DeadStackObjects set. If so, they become live
/// because the location is being loaded.
-void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
+void DSE::RemoveAccessedObjects(const MemoryLocation &LoadedLoc,
SmallSetVector<Value *, 16> &DeadStackObjects,
const DataLayout &DL) {
const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
@@ -864,8 +864,8 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc,
// Remove objects that could alias LoadedLoc.
DeadStackObjects.remove_if([&](Value *I) {
// See if the loaded location could alias the stack location.
- AliasAnalysis::Location StackLoc(
- I, getPointerSize(I, DL, AA->getTargetLibraryInfo()));
+ MemoryLocation StackLoc(I,
+ getPointerSize(I, DL, AA->getTargetLibraryInfo()));
return !AA->isNoAlias(StackLoc, LoadedLoc);
});
}
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp
index d536a937dce1..8b629eaca9d4 100644
--- a/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -72,7 +72,7 @@ struct SimpleValue {
isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
}
};
-}
+} // namespace
namespace llvm {
template <> struct DenseMapInfo<SimpleValue> {
@@ -85,7 +85,7 @@ template <> struct DenseMapInfo<SimpleValue> {
static unsigned getHashValue(SimpleValue Val);
static bool isEqual(SimpleValue LHS, SimpleValue RHS);
};
-}
+} // namespace llvm
unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
Instruction *Inst = Val.Inst;
@@ -219,7 +219,7 @@ struct CallValue {
return true;
}
};
-}
+} // namespace
namespace llvm {
template <> struct DenseMapInfo<CallValue> {
@@ -232,7 +232,7 @@ template <> struct DenseMapInfo<CallValue> {
static unsigned getHashValue(CallValue Val);
static bool isEqual(CallValue LHS, CallValue RHS);
};
-}
+} // namespace llvm
unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
Instruction *Inst = Val.Inst;
@@ -447,7 +447,7 @@ private:
ExpectedType);
}
};
-}
+} // namespace
bool EarlyCSE::processNode(DomTreeNode *Node) {
BasicBlock *BB = Node->getBlock();
@@ -764,7 +764,7 @@ public:
AU.setPreservesCFG();
}
};
-}
+} // namespace
char EarlyCSELegacyPass::ID = 0;
diff --git a/lib/Transforms/Scalar/FlattenCFGPass.cpp b/lib/Transforms/Scalar/FlattenCFGPass.cpp
index 0430c1898c8d..dd6ea8d455c5 100644
--- a/lib/Transforms/Scalar/FlattenCFGPass.cpp
+++ b/lib/Transforms/Scalar/FlattenCFGPass.cpp
@@ -36,7 +36,7 @@ public:
private:
AliasAnalysis *AA;
};
-}
+} // namespace
char FlattenCFGPass::ID = 0;
INITIALIZE_PASS_BEGIN(FlattenCFGPass, "flattencfg", "Flatten the CFG", false,
diff --git a/lib/Transforms/Scalar/Float2Int.cpp b/lib/Transforms/Scalar/Float2Int.cpp
index c9314229c38b..bb90c5f73239 100644
--- a/lib/Transforms/Scalar/Float2Int.cpp
+++ b/lib/Transforms/Scalar/Float2Int.cpp
@@ -79,7 +79,7 @@ namespace {
MapVector<Instruction*, Value*> ConvertedInsts;
LLVMContext *Ctx;
};
-}
+} // namespace
char Float2Int::ID = 0;
INITIALIZE_PASS(Float2Int, "float2int", "Float to int", false, false)
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 7770ddcb9d7a..d9308c4e3710 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -138,7 +138,7 @@ namespace {
uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
void verifyRemoved(const Value *) const;
};
-}
+} // namespace
namespace llvm {
template <> struct DenseMapInfo<Expression> {
@@ -159,7 +159,7 @@ template <> struct DenseMapInfo<Expression> {
}
};
-}
+} // namespace llvm
//===----------------------------------------------------------------------===//
// ValueTable Internal Functions
@@ -723,7 +723,7 @@ namespace {
};
char GVN::ID = 0;
-}
+} // namespace
// The public interface to this file...
FunctionPass *llvm::createGVNPass(bool NoLoads) {
@@ -852,13 +852,12 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
/// If we saw a store of a value to memory, and
/// then a load from a must-aliased pointer of a different type, try to coerce
-/// the stored value. LoadedTy is the type of the load we want to replace and
-/// InsertPt is the place to insert new instructions.
+/// the stored value. LoadedTy is the type of the load we want to replace.
+/// IRB is IRBuilder used to insert new instructions.
///
/// If we can't do it, return null.
-static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
- Type *LoadedTy,
- Instruction *InsertPt,
+static Value *CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy,
+ IRBuilder<> &IRB,
const DataLayout &DL) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL))
return nullptr;
@@ -874,12 +873,12 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Pointer to Pointer -> use bitcast.
if (StoredValTy->getScalarType()->isPointerTy() &&
LoadedTy->getScalarType()->isPointerTy())
- return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
+ return IRB.CreateBitCast(StoredVal, LoadedTy);
// Convert source pointers to integers, which can be bitcast.
if (StoredValTy->getScalarType()->isPointerTy()) {
StoredValTy = DL.getIntPtrType(StoredValTy);
- StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
+ StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy);
}
Type *TypeToCastTo = LoadedTy;
@@ -887,11 +886,11 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
TypeToCastTo = DL.getIntPtrType(TypeToCastTo);
if (StoredValTy != TypeToCastTo)
- StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
+ StoredVal = IRB.CreateBitCast(StoredVal, TypeToCastTo);
// Cast to pointer if the load needs a pointer type.
if (LoadedTy->getScalarType()->isPointerTy())
- StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
+ StoredVal = IRB.CreateIntToPtr(StoredVal, LoadedTy);
return StoredVal;
}
@@ -904,35 +903,34 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Convert source pointers to integers, which can be manipulated.
if (StoredValTy->getScalarType()->isPointerTy()) {
StoredValTy = DL.getIntPtrType(StoredValTy);
- StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
+ StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy);
}
// Convert vectors and fp to integer, which can be manipulated.
if (!StoredValTy->isIntegerTy()) {
StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
- StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
+ StoredVal = IRB.CreateBitCast(StoredVal, StoredValTy);
}
// If this is a big-endian system, we need to shift the value down to the low
// bits so that a truncate will work.
if (DL.isBigEndian()) {
- Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
- StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
+ StoredVal = IRB.CreateLShr(StoredVal, StoreSize - LoadSize, "tmp");
}
// Truncate the integer to the right size now.
Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
- StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
+ StoredVal = IRB.CreateTrunc(StoredVal, NewIntTy, "trunc");
if (LoadedTy == NewIntTy)
return StoredVal;
// If the result is a pointer, inttoptr.
if (LoadedTy->getScalarType()->isPointerTy())
- return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
+ return IRB.CreateIntToPtr(StoredVal, LoadedTy, "inttoptr");
// Otherwise, bitcast.
- return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
+ return IRB.CreateBitCast(StoredVal, LoadedTy, "bitcast");
}
/// This function is called when we have a
@@ -1122,7 +1120,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8;
- IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
+ IRBuilder<> Builder(InsertPt);
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
@@ -1145,7 +1143,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
if (LoadSize != StoreSize)
SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8));
- return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, DL);
+ return CoerceAvailableValueToLoadType(SrcVal, LoadTy, Builder, DL);
}
/// This function is called when we have a
@@ -1219,7 +1217,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
LLVMContext &Ctx = LoadTy->getContext();
uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8;
- IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
+ IRBuilder<> Builder(InsertPt);
// We know that this method is only called when the mem transfer fully
// provides the bits for the load.
@@ -1248,7 +1246,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
++NumBytesSet;
}
- return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, DL);
+ return CoerceAvailableValueToLoadType(Val, LoadTy, Builder, DL);
}
// Otherwise, this is a memcpy/memmove from a constant global.
@@ -1695,6 +1693,8 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
LI->replaceAllUsesWith(V);
if (isa<PHINode>(V))
V->takeName(LI);
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ I->setDebugLoc(LI->getDebugLoc());
if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
@@ -1761,6 +1761,8 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
if (isa<PHINode>(V))
V->takeName(LI);
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ I->setDebugLoc(LI->getDebugLoc());
if (V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
markInstructionForDeletion(LI);
@@ -1928,8 +1930,9 @@ bool GVN::processLoad(LoadInst *L) {
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
if (StoredVal->getType() != L->getType()) {
+ IRBuilder<> Builder(L);
StoredVal =
- CoerceAvailableValueToLoadType(StoredVal, L->getType(), L, DL);
+ CoerceAvailableValueToLoadType(StoredVal, L->getType(), Builder, DL);
if (!StoredVal)
return false;
@@ -1953,7 +1956,9 @@ bool GVN::processLoad(LoadInst *L) {
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
if (DepLI->getType() != L->getType()) {
- AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L, DL);
+ IRBuilder<> Builder(L);
+ AvailableVal =
+ CoerceAvailableValueToLoadType(DepLI, L->getType(), Builder, DL);
if (!AvailableVal)
return false;
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index 359a616c069d..e931382ea98f 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -136,7 +136,7 @@ namespace {
void SinkUnusedInvariants(Loop *L);
};
-}
+} // namespace
char IndVarSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars",
@@ -494,7 +494,7 @@ struct RewritePhi {
RewritePhi(PHINode *P, unsigned I, Value *V, bool H, bool S)
: PN(P), Ith(I), Val(V), HighCost(H), SafePhi(S) {}
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
// RewriteLoopExitValues - Optimize IV users outside the loop.
@@ -758,7 +758,7 @@ namespace {
WideIVInfo() : NarrowIV(nullptr), WidestNativeType(nullptr),
IsSigned(false) {}
};
-}
+} // namespace
/// visitCast - Update information about the induction variable that is
/// extended by this sign or zero extend operation. This is used to determine
@@ -1321,7 +1321,7 @@ namespace {
// Implement the interface used by simplifyUsersOfIV.
void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, TTI); }
};
-}
+} // namespace
/// SimplifyAndExtend - Iteratively perform simplification on a worklist of IV
/// users. Each successive simplification may push more users which may
@@ -2013,10 +2013,11 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// Now that we're done iterating through lists, clean up any instructions
// which are now dead.
- while (!DeadInsts.empty())
- if (Instruction *Inst =
- dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
+ while (!DeadInsts.empty()) {
+ Value *V = static_cast<Value *>(DeadInsts.pop_back_val());
+ if (Instruction *Inst = dyn_cast_or_null<Instruction>(V))
RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
+ }
// The Rewriter may not be used from this point on.
diff --git a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index cbdacad8f28b..ce1a0ca8c7d9 100644
--- a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -222,7 +222,7 @@ public:
};
char InductiveRangeCheckElimination::ID = 0;
-}
+} // namespace
INITIALIZE_PASS(InductiveRangeCheckElimination, "irce",
"Inductive range check elimination", false, false)
@@ -618,7 +618,7 @@ public:
bool run();
};
-}
+} // namespace
void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block,
BasicBlock *ReplaceBy) {
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 711df417992b..7316db6ca02c 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -138,7 +138,7 @@ namespace {
bool SimplifyPartiallyRedundantLoad(LoadInst *LI);
bool TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
};
-}
+} // namespace
char JumpThreading::ID = 0;
INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",
@@ -758,67 +758,33 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) {
- // For a comparison where the LHS is outside this block, it's possible
- // that we've branched on it before. Used LVI to see if we can simplify
- // the branch based on that.
+ // If we're branching on a conditional, LVI might be able to determine
+ // it's value at the branch instruction. We only handle comparisons
+ // against a constant at this time.
+ // TODO: This should be extended to handle switches as well.
BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1));
- pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
- if (CondBr && CondConst && CondBr->isConditional() && PI != PE &&
- (!isa<Instruction>(CondCmp->getOperand(0)) ||
- cast<Instruction>(CondCmp->getOperand(0))->getParent() != BB)) {
- // For predecessor edge, determine if the comparison is true or false
- // on that edge. If they're all true or all false, we can simplify the
- // branch.
- // FIXME: We could handle mixed true/false by duplicating code.
- LazyValueInfo::Tristate Baseline =
- LVI->getPredicateOnEdge(CondCmp->getPredicate(), CondCmp->getOperand(0),
- CondConst, *PI, BB, CondCmp);
- if (Baseline != LazyValueInfo::Unknown) {
- // Check that all remaining incoming values match the first one.
- while (++PI != PE) {
- LazyValueInfo::Tristate Ret =
- LVI->getPredicateOnEdge(CondCmp->getPredicate(),
- CondCmp->getOperand(0), CondConst, *PI, BB,
- CondCmp);
- if (Ret != Baseline) break;
- }
-
- // If we terminated early, then one of the values didn't match.
- if (PI == PE) {
- unsigned ToRemove = Baseline == LazyValueInfo::True ? 1 : 0;
- unsigned ToKeep = Baseline == LazyValueInfo::True ? 0 : 1;
- CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true);
- BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
- CondBr->eraseFromParent();
- if (CondCmp->use_empty())
- CondCmp->eraseFromParent();
- else if (CondCmp->getParent() == BB) {
- // If the fact we just learned is true for all uses of the
- // condition, replace it with a constant value
- auto *CI = Baseline == LazyValueInfo::True ?
- ConstantInt::getTrue(CondCmp->getType()) :
- ConstantInt::getFalse(CondCmp->getType());
- CondCmp->replaceAllUsesWith(CI);
- CondCmp->eraseFromParent();
- }
- return true;
- }
- }
-
- } else if (CondBr && CondConst && CondBr->isConditional()) {
- // There might be an invariant in the same block with the conditional
- // that can determine the predicate.
-
+ if (CondBr && CondConst && CondBr->isConditional()) {
LazyValueInfo::Tristate Ret =
LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0),
- CondConst, CondCmp);
+ CondConst, CondBr);
if (Ret != LazyValueInfo::Unknown) {
unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0;
unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1;
CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true);
BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
CondBr->eraseFromParent();
+ if (CondCmp->use_empty())
+ CondCmp->eraseFromParent();
+ else if (CondCmp->getParent() == BB) {
+ // If the fact we just learned is true for all uses of the
+ // condition, replace it with a constant value
+ auto *CI = Ret == LazyValueInfo::True ?
+ ConstantInt::getTrue(CondCmp->getType()) :
+ ConstantInt::getFalse(CondCmp->getType());
+ CondCmp->replaceAllUsesWith(CI);
+ CondCmp->eraseFromParent();
+ }
return true;
}
}
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index f0e6d641b180..e5019463bb5f 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -156,7 +156,7 @@ namespace {
/// Simple Analysis hook. Delete loop L from alias set map.
void deleteAnalysisLoop(Loop *L) override;
};
-}
+} // namespace
char LICM::ID = 0;
INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false)
@@ -777,7 +777,7 @@ namespace {
AST.deleteValue(I);
}
};
-} // end anon namespace
+} // namespace
/// Try to promote memory values to scalars by sinking stores out of the
/// loop and moving loads to before the loop. We do this by looping over
diff --git a/lib/Transforms/Scalar/LoadCombine.cpp b/lib/Transforms/Scalar/LoadCombine.cpp
index c19cd19059b2..3dbf6ac6ed08 100644
--- a/lib/Transforms/Scalar/LoadCombine.cpp
+++ b/lib/Transforms/Scalar/LoadCombine.cpp
@@ -77,7 +77,7 @@ private:
bool aggregateLoads(SmallVectorImpl<LoadPOPPair> &);
bool combineLoads(SmallVectorImpl<LoadPOPPair> &);
};
-}
+} // namespace
bool LoadCombine::doInitialization(Function &F) {
DEBUG(dbgs() << "LoadCombine function: " << F.getName() << "\n");
diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp
index 98b068edf582..02760ffe2c68 100644
--- a/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -57,7 +57,7 @@ namespace {
bool &Changed, BasicBlock *Preheader);
};
-}
+} // namespace
char LoopDeletion::ID = 0;
INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion",
diff --git a/lib/Transforms/Scalar/LoopDistribute.cpp b/lib/Transforms/Scalar/LoopDistribute.cpp
index a907d596e35b..d21a7db48c51 100644
--- a/lib/Transforms/Scalar/LoopDistribute.cpp
+++ b/lib/Transforms/Scalar/LoopDistribute.cpp
@@ -630,26 +630,17 @@ private:
};
/// \brief Handles the loop versioning based on memchecks.
-class RuntimeCheckEmitter {
+class LoopVersioning {
public:
- RuntimeCheckEmitter(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
- DominatorTree *DT)
- : OrigLoop(L), NonDistributedLoop(nullptr), LAI(LAI), LI(LI), DT(DT) {}
-
- /// \brief Given the \p Partitions formed by Loop Distribution, it determines
- /// in which partition each pointer is used.
- void partitionPointers(InstPartitionContainer &Partitions) {
- // Set up partition id in PtrRtChecks. Ptr -> Access -> Intruction ->
- // Partition.
- PtrToPartition = Partitions.computePartitionSetForPointers(LAI);
-
- DEBUG(dbgs() << "\nPointers:\n");
- DEBUG(LAI.getRuntimePointerCheck()->print(dbgs(), 0, &PtrToPartition));
- }
+ LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
+ DominatorTree *DT,
+ const SmallVector<int, 8> *PtrToPartition = nullptr)
+ : OrigLoop(L), NonDistributedLoop(nullptr),
+ PtrToPartition(PtrToPartition), LAI(LAI), LI(LI), DT(DT) {}
/// \brief Returns true if we need memchecks to distribute the loop.
bool needsRuntimeChecks() const {
- return LAI.getRuntimePointerCheck()->needsAnyChecking(&PtrToPartition);
+ return LAI.getRuntimePointerCheck()->needsAnyChecking(PtrToPartition);
}
/// \brief Performs the CFG manipulation part of versioning the loop including
@@ -660,7 +651,7 @@ public:
// Add the memcheck in the original preheader (this is empty initially).
BasicBlock *MemCheckBB = OrigLoop->getLoopPreheader();
std::tie(FirstCheckInst, MemRuntimeCheck) =
- LAI.addRuntimeCheck(MemCheckBB->getTerminator(), &PtrToPartition);
+ LAI.addRuntimeCheck(MemCheckBB->getTerminator(), PtrToPartition);
assert(MemRuntimeCheck && "called even though needsAnyChecking = false");
// Rename the block to make the IR more readable.
@@ -733,10 +724,11 @@ private:
Loop *NonDistributedLoop;
/// \brief For each memory pointer it contains the partitionId it is used in.
+ /// If nullptr, no partitioning is used.
///
/// The I-th entry corresponds to I-th entry in LAI.getRuntimePointerCheck().
/// If the pointer is used in multiple partitions the entry is set to -1.
- SmallVector<int, 8> PtrToPartition;
+ const SmallVector<int, 8> *PtrToPartition;
/// \brief This maps the instructions from OrigLoop to their counterpart in
/// NonDistributedLoop.
@@ -929,11 +921,13 @@ private:
// If we need run-time checks to disambiguate pointers are run-time, version
// the loop now.
- RuntimeCheckEmitter RtCheckEmitter(LAI, L, LI, DT);
- RtCheckEmitter.partitionPointers(Partitions);
- if (RtCheckEmitter.needsRuntimeChecks()) {
- RtCheckEmitter.versionLoop(this);
- RtCheckEmitter.addPHINodes(DefsUsedOutside);
+ auto PtrToPartition = Partitions.computePartitionSetForPointers(LAI);
+ LoopVersioning LVer(LAI, L, LI, DT, &PtrToPartition);
+ if (LVer.needsRuntimeChecks()) {
+ DEBUG(dbgs() << "\nPointers:\n");
+ DEBUG(LAI.getRuntimePointerCheck()->print(dbgs(), 0, &PtrToPartition));
+ LVer.versionLoop(this);
+ LVer.addPHINodes(DefsUsedOutside);
}
// Create identical copies of the original loop for each partition and hook
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index f92ecd4efdae..3de1333a7c98 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -209,7 +209,7 @@ namespace {
bool runOnNoncountableLoop();
bool runOnCountableLoop();
};
-}
+} // namespace
char LoopIdiomRecognize::ID = 0;
INITIALIZE_PASS_BEGIN(LoopIdiomRecognize, "loop-idiom", "Recognize loop idioms",
@@ -833,7 +833,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
// Get the location that may be stored across the loop. Since the access is
// strided positively through memory, we say that the modified location starts
// at the pointer and has infinite size.
- uint64_t AccessSize = AliasAnalysis::UnknownSize;
+ uint64_t AccessSize = MemoryLocation::UnknownSize;
// If the loop iterates a fixed number of times, we can refine the access size
// to be exactly the size of the memset, which is (BECount+1)*StoreSize
@@ -844,7 +844,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
// operand in the store. Store to &A[i] of 100 will always return may alias
// with store of &A[100], we need to StoreLoc to be "A" with size of 100,
// which will then no-alias a store to &A[100].
- AliasAnalysis::Location StoreLoc(Ptr, AccessSize);
+ MemoryLocation StoreLoc(Ptr, AccessSize);
for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
++BI)
diff --git a/lib/Transforms/Scalar/LoopInstSimplify.cpp b/lib/Transforms/Scalar/LoopInstSimplify.cpp
index e12502654751..4c40f249ce1d 100644
--- a/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -52,7 +52,7 @@ namespace {
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
};
-}
+} // namespace
char LoopInstSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(LoopInstSimplify, "loop-instsimplify",
diff --git a/lib/Transforms/Scalar/LoopInterchange.cpp b/lib/Transforms/Scalar/LoopInterchange.cpp
index f584018299d1..25546553fd4d 100644
--- a/lib/Transforms/Scalar/LoopInterchange.cpp
+++ b/lib/Transforms/Scalar/LoopInterchange.cpp
@@ -598,8 +598,8 @@ struct LoopInterchange : public FunctionPass {
bool LoopInterchangeLegality::areAllUsesReductions(Instruction *Ins, Loop *L) {
return !std::any_of(Ins->user_begin(), Ins->user_end(), [=](User *U) -> bool {
PHINode *UserIns = dyn_cast<PHINode>(U);
- ReductionDescriptor RD;
- return !UserIns || !ReductionDescriptor::isReductionPHI(UserIns, L, RD);
+ RecurrenceDescriptor RD;
+ return !UserIns || !RecurrenceDescriptor::isReductionPHI(UserIns, L, RD);
});
}
@@ -697,12 +697,12 @@ bool LoopInterchangeLegality::findInductionAndReductions(
if (!L->getLoopLatch() || !L->getLoopPredecessor())
return false;
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
- ReductionDescriptor RD;
+ RecurrenceDescriptor RD;
PHINode *PHI = cast<PHINode>(I);
ConstantInt *StepValue = nullptr;
if (isInductionPHI(PHI, SE, StepValue))
Inductions.push_back(PHI);
- else if (ReductionDescriptor::isReductionPHI(PHI, L, RD))
+ else if (RecurrenceDescriptor::isReductionPHI(PHI, L, RD))
Reductions.push_back(PHI);
else {
DEBUG(
diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp
index ed103e6b8ed6..f6db9b114e3f 100644
--- a/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -438,7 +438,7 @@ namespace {
bool reroll(Instruction *IV, Loop *L, BasicBlock *Header, const SCEV *IterCount,
ReductionTracker &Reductions);
};
-}
+} // namespace
char LoopReroll::ID = 0;
INITIALIZE_PASS_BEGIN(LoopReroll, "loop-reroll", "Reroll loops", false, false)
diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp
index a675e1289baf..2ba70ad1f1a7 100644
--- a/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/lib/Transforms/Scalar/LoopRotation.cpp
@@ -79,7 +79,7 @@ namespace {
AssumptionCache *AC;
DominatorTree *DT;
};
-}
+} // namespace
char LoopRotate::ID = 0;
INITIALIZE_PASS_BEGIN(LoopRotate, "loop-rotate", "Rotate Loops", false, false)
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 4b59f3d2f6cc..ee7248691992 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -116,7 +116,7 @@ public:
void dump() const;
};
-}
+} // namespace
void RegSortData::print(raw_ostream &OS) const {
OS << "[NumUses=" << UsedByIndices.count() << ']';
@@ -157,7 +157,7 @@ public:
const_iterator end() const { return RegSequence.end(); }
};
-}
+} // namespace
void
RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) {
@@ -281,7 +281,7 @@ struct Formula {
void dump() const;
};
-}
+} // namespace
/// DoInitialMatch - Recursion helper for InitialMatch.
static void DoInitialMatch(const SCEV *S, Loop *L,
@@ -903,7 +903,7 @@ private:
SmallPtrSetImpl<const SCEV *> *LoserRegs);
};
-}
+} // namespace
/// RateRegister - Tally up interesting quantities from the given register.
void Cost::RateRegister(const SCEV *Reg,
@@ -1102,7 +1102,7 @@ struct LSRFixup {
void dump() const;
};
-}
+} // namespace
LSRFixup::LSRFixup()
: UserInst(nullptr), OperandValToReplace(nullptr), LUIdx(~size_t(0)),
@@ -1252,7 +1252,7 @@ public:
void dump() const;
};
-}
+} // namespace
/// HasFormula - Test whether this use as a formula which has the same
/// registers as the given formula.
@@ -1791,7 +1791,7 @@ public:
void dump() const;
};
-}
+} // namespace
/// OptimizeShadowIV - If IV is used in a int-to-float cast
/// inside the loop then try to eliminate the cast operation.
@@ -3644,7 +3644,7 @@ struct WorkItem {
void dump() const;
};
-}
+} // namespace
void WorkItem::print(raw_ostream &OS) const {
OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
@@ -4949,7 +4949,7 @@ private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
-}
+} // namespace
char LoopStrengthReduce::ID = 0;
INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 4ccbfc953e0c..d702dc0b4ee9 100644
--- a/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -229,7 +229,7 @@ namespace {
unsigned DynamicCostSavingsDiscount,
uint64_t UnrolledCost, uint64_t RolledDynamicCost);
};
-}
+} // namespace
char LoopUnroll::ID = 0;
INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
@@ -455,13 +455,15 @@ struct EstimatedUnrollCost {
///
/// Complete loop unrolling can make some loads constant, and we need to know
/// if that would expose any further optimization opportunities. This routine
-/// estimates this optimization. It assigns computed number of instructions,
-/// that potentially might be optimized away, to
-/// NumberOfOptimizedInstructions, and total number of instructions to
-/// UnrolledLoopSize (not counting blocks that won't be reached, if we were
-/// able to compute the condition).
-/// \returns false if we can't analyze the loop, or if we discovered that
-/// unrolling won't give anything. Otherwise, returns true.
+/// estimates this optimization. It computes cost of unrolled loop
+/// (UnrolledCost) and dynamic cost of the original loop (RolledDynamicCost). By
+/// dynamic cost we mean that we won't count costs of blocks that are known not
+/// to be executed (i.e. if we have a branch in the loop and we know that at the
+/// given iteration its condition would be resolved to true, we won't add up the
+/// cost of the 'false'-block).
+/// \returns Optional value, holding the RolledDynamicCost and UnrolledCost. If
+/// the analysis failed (no benefits expected from the unrolling, or the loop is
+/// too big to analyze), the returned value is None.
Optional<EstimatedUnrollCost>
analyzeLoopUnrollCost(const Loop *L, unsigned TripCount, ScalarEvolution &SE,
const TargetTransformInfo &TTI,
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 988d2af3ea90..5bdc2ec88d4a 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -213,7 +213,7 @@ namespace {
BasicBlock **LoopExit = nullptr);
};
-}
+} // namespace
// Analyze loop. Check its size, calculate is it possible to unswitch
// it. Returns true if we can unswitch this loop.
diff --git a/lib/Transforms/Scalar/LowerAtomic.cpp b/lib/Transforms/Scalar/LowerAtomic.cpp
index 3314e1ed41ab..b8b35d4249f0 100644
--- a/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -138,7 +138,7 @@ namespace {
return Changed;
}
};
-}
+} // namespace
char LowerAtomic::ID = 0;
INITIALIZE_PASS(LowerAtomic, "loweratomic",
diff --git a/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp b/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
index 0c47cbd5bfda..b845c038e67e 100644
--- a/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
+++ b/lib/Transforms/Scalar/LowerExpectIntrinsic.cpp
@@ -181,7 +181,7 @@ public:
bool runOnFunction(Function &F) override { return lowerExpectIntrinsic(F); }
};
-}
+} // namespace
char LowerExpectIntrinsic::ID = 0;
INITIALIZE_PASS(LowerExpectIntrinsic, "lower-expect",
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 2bdf670f67e3..2c9f93513ae2 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -153,7 +153,7 @@ struct MemsetRange {
bool isProfitableToUseMemset(const DataLayout &DL) const;
};
-} // end anon namespace
+} // namespace
bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
// If we found more than 4 stores to merge or 16 bytes, use memset.
@@ -237,7 +237,7 @@ public:
};
-} // end anon namespace
+} // namespace
/// addRange - Add a new store to the MemsetRanges data structure. This adds a
@@ -337,7 +337,7 @@ namespace {
AU.addPreserved<MemoryDependenceAnalysis>();
}
- // Helper fuctions
+ // Helper functions
bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
bool processMemCpy(MemCpyInst *M);
@@ -355,7 +355,7 @@ namespace {
};
char MemCpyOpt::ID = 0;
-}
+} // namespace
// createMemCpyOptPass - The public interface to this file...
FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
@@ -510,7 +510,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// Check that nothing touches the dest of the "copy" between
// the call and the store.
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
- AliasAnalysis::Location StoreLoc = MemoryLocation::get(SI);
+ MemoryLocation StoreLoc = MemoryLocation::get(SI);
for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
E = C; I != E; --I) {
if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) {
@@ -997,7 +997,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
}
}
- AliasAnalysis::Location SrcLoc = MemoryLocation::getForSource(M);
+ MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
M, M->getParent());
@@ -1075,10 +1075,9 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
Value *ByValArg = CS.getArgument(ArgNo);
Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
- MemDepResult DepInfo =
- MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
- true, CS.getInstruction(),
- CS.getInstruction()->getParent());
+ MemDepResult DepInfo = MD->getPointerDependencyFrom(
+ MemoryLocation(ByValArg, ByValSize), true, CS.getInstruction(),
+ CS.getInstruction()->getParent());
if (!DepInfo.isClobber())
return false;
diff --git a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
index 776dfb4d487f..886b6f5b0a2c 100644
--- a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
+++ b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp
@@ -144,9 +144,8 @@ private:
// Routines for sinking stores
StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI);
PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1);
- bool isStoreSinkBarrierInRange(const Instruction& Start,
- const Instruction& End,
- AliasAnalysis::Location Loc);
+ bool isStoreSinkBarrierInRange(const Instruction &Start,
+ const Instruction &End, MemoryLocation Loc);
bool sinkStore(BasicBlock *BB, StoreInst *SinkCand, StoreInst *ElseInst);
bool mergeStores(BasicBlock *BB);
// The mergeLoad/Store algorithms could have Size0 * Size1 complexity,
@@ -157,7 +156,7 @@ private:
};
char MergedLoadStoreMotion::ID = 0;
-}
+} // namespace
///
/// \brief createMergedLoadStoreMotionPass - The public interface to this file.
@@ -241,7 +240,7 @@ bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) {
bool MergedLoadStoreMotion::isLoadHoistBarrierInRange(const Instruction& Start,
const Instruction& End,
LoadInst* LI) {
- AliasAnalysis::Location Loc = MemoryLocation::get(LI);
+ MemoryLocation Loc = MemoryLocation::get(LI);
return AA->canInstructionRangeModRef(Start, End, Loc, AliasAnalysis::Mod);
}
@@ -266,8 +265,8 @@ LoadInst *MergedLoadStoreMotion::canHoistFromBlock(BasicBlock *BB1,
LoadInst *Load1 = dyn_cast<LoadInst>(Inst);
BasicBlock *BB0 = Load0->getParent();
- AliasAnalysis::Location Loc0 = MemoryLocation::get(Load0);
- AliasAnalysis::Location Loc1 = MemoryLocation::get(Load1);
+ MemoryLocation Loc0 = MemoryLocation::get(Load0);
+ MemoryLocation Loc1 = MemoryLocation::get(Load1);
if (AA->isMustAlias(Loc0, Loc1) && Load0->isSameOperationAs(Load1) &&
!isLoadHoistBarrierInRange(BB1->front(), *Load1, Load1) &&
!isLoadHoistBarrierInRange(BB0->front(), *Load0, Load0)) {
@@ -400,10 +399,9 @@ bool MergedLoadStoreMotion::mergeLoads(BasicBlock *BB) {
/// happening it is considered a sink barrier.
///
-bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction& Start,
- const Instruction& End,
- AliasAnalysis::Location
- Loc) {
+bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start,
+ const Instruction &End,
+ MemoryLocation Loc) {
return AA->canInstructionRangeModRef(Start, End, Loc, AliasAnalysis::ModRef);
}
@@ -425,8 +423,8 @@ StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1,
StoreInst *Store1 = cast<StoreInst>(Inst);
- AliasAnalysis::Location Loc0 = MemoryLocation::get(Store0);
- AliasAnalysis::Location Loc1 = MemoryLocation::get(Store1);
+ MemoryLocation Loc0 = MemoryLocation::get(Store0);
+ MemoryLocation Loc1 = MemoryLocation::get(Store1);
if (AA->isMustAlias(Loc0, Loc1) && Store0->isSameOperationAs(Store1) &&
!isStoreSinkBarrierInRange(*(std::next(BasicBlock::iterator(Store1))),
BB1->back(), Loc1) &&
diff --git a/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
index 31d7df39c781..5423499723f7 100644
--- a/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
+++ b/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
@@ -46,7 +46,7 @@ namespace {
};
char PartiallyInlineLibCalls::ID = 0;
-}
+} // namespace
INITIALIZE_PASS(PartiallyInlineLibCalls, "partially-inline-libcalls",
"Partially inline calls to library functions", false, false)
diff --git a/lib/Transforms/Scalar/PlaceSafepoints.cpp b/lib/Transforms/Scalar/PlaceSafepoints.cpp
index 9ecaf102574a..670dcd24f75c 100644
--- a/lib/Transforms/Scalar/PlaceSafepoints.cpp
+++ b/lib/Transforms/Scalar/PlaceSafepoints.cpp
@@ -160,7 +160,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass {
AU.setPreservesAll();
}
};
-}
+} // namespace
static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
@@ -181,7 +181,7 @@ struct PlaceSafepoints : public FunctionPass {
// if that was worth doing
}
};
-}
+} // namespace
// Insert a safepoint poll immediately before the given instruction. Does
// not handle the parsability of state at the runtime call, that's the
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index 6c66b58729e9..9842fd7bb6c7 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -154,7 +154,7 @@ namespace {
unsigned SymbolicRank;
bool isOr;
};
-}
+} // namespace
namespace {
class Reassociate : public FunctionPass {
@@ -197,7 +197,7 @@ namespace {
void OptimizeInst(Instruction *I);
Instruction *canonicalizeNegConstExpr(Instruction *I);
};
-}
+} // namespace
XorOpnd::XorOpnd(Value *V) {
assert(!isa<ConstantInt>(V) && "No ConstantInt");
diff --git a/lib/Transforms/Scalar/Reg2Mem.cpp b/lib/Transforms/Scalar/Reg2Mem.cpp
index 1b46727c17bb..2ff56e67c9c6 100644
--- a/lib/Transforms/Scalar/Reg2Mem.cpp
+++ b/lib/Transforms/Scalar/Reg2Mem.cpp
@@ -58,7 +58,7 @@ namespace {
bool runOnFunction(Function &F) override;
};
-}
+} // namespace
char RegToMem::ID = 0;
INITIALIZE_PASS_BEGIN(RegToMem, "reg2mem", "Demote all values to stack slots",
diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 6f6ba72c6e6f..c15bc1bd7eca 100644
--- a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -183,7 +183,7 @@ struct PartiallyConstructedSafepointRecord {
/// Maps rematerialized copy to it's original value.
RematerializedValueMapTy RematerializedValues;
};
-}
+} // namespace
/// Compute the live-in set for every basic block in the function
static void computeLiveInValues(DominatorTree &DT, Function &F,
@@ -646,7 +646,7 @@ private:
llvm_unreachable("only three states!");
}
};
-}
+} // namespace
/// For a given value or instruction, figure out what base ptr it's derived
/// from. For gc objects, this is simply itself. On success, returns a value
/// which is the base pointer. (This is reliable and can be used for
@@ -1659,17 +1659,10 @@ static void relocationViaAlloca(
/// vector. Doing so has the effect of changing the output of a couple of
/// tests in ways which make them less useful in testing fused safepoints.
template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) {
- DenseSet<T> Seen;
- SmallVector<T, 128> TempVec;
- TempVec.reserve(Vec.size());
- for (auto Element : Vec)
- TempVec.push_back(Element);
- Vec.clear();
- for (auto V : TempVec) {
- if (Seen.insert(V).second) {
- Vec.push_back(V);
- }
- }
+ SmallSet<T, 8> Seen;
+ Vec.erase(std::remove_if(Vec.begin(), Vec.end(), [&](const T &V) {
+ return !Seen.insert(V).second;
+ }), Vec.end());
}
/// Insert holders so that each Value is obviously live through the entire
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index 056dd11b5ab3..f38b2b1dbf96 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -127,7 +127,7 @@ typedef llvm::IRBuilder<true, ConstantFolder, IRBuilderPrefixedInserter<true>>
typedef llvm::IRBuilder<false, ConstantFolder, IRBuilderPrefixedInserter<false>>
IRBuilderTy;
#endif
-}
+} // namespace
namespace {
/// \brief A used slice of an alloca.
@@ -595,7 +595,7 @@ private:
/// the alloca.
SmallVector<Use *, 8> DeadOperands;
};
-}
+} // namespace
static Value *foldSelectInst(SelectInst &SI) {
// If the condition being selected on is a constant or the same value is
@@ -1173,7 +1173,7 @@ public:
}
}
};
-} // end anon namespace
+} // namespace
namespace {
/// \brief An optimization pass providing Scalar Replacement of Aggregates.
@@ -1268,7 +1268,7 @@ private:
void deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
bool promoteAllocas(Function &F);
};
-}
+} // namespace
char SROA::ID = 0;
@@ -3119,7 +3119,7 @@ private:
return true;
}
};
-}
+} // namespace
namespace {
/// \brief Visitor to rewrite aggregate loads and stores as scalar.
@@ -3327,7 +3327,7 @@ private:
return false;
}
};
-}
+} // namespace
/// \brief Strip aggregate type wrapping.
///
diff --git a/lib/Transforms/Scalar/SampleProfile.cpp b/lib/Transforms/Scalar/SampleProfile.cpp
index 3480cd499127..69e3a67aa8c1 100644
--- a/lib/Transforms/Scalar/SampleProfile.cpp
+++ b/lib/Transforms/Scalar/SampleProfile.cpp
@@ -174,7 +174,7 @@ protected:
/// \brief Flag indicating whether the profile input loaded successfully.
bool ProfileIsValid;
};
-}
+} // namespace
/// \brief Print the weight of edge \p E on stream \p OS.
///
@@ -282,7 +282,7 @@ bool SampleProfileLoader::computeBlockWeights(Function &F) {
/// \brief Find equivalence classes for the given block.
///
/// This finds all the blocks that are guaranteed to execute the same
-/// number of times as \p BB1. To do this, it traverses all the the
+/// number of times as \p BB1. To do this, it traverses all the
/// descendants of \p BB1 in the dominator or post-dominator tree.
///
/// A block BB2 will be in the same equivalence class as \p BB1 if
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index d955da7ce75d..e42c3daab8d7 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -221,7 +221,7 @@ namespace {
}
};
-}
+} // namespace
char SROA_DT::ID = 0;
char SROA_SSAUp::ID = 0;
@@ -1123,7 +1123,7 @@ public:
}
}
};
-} // end anon namespace
+} // namespace
/// isSafeSelectToSpeculate - Select instructions that use an alloca and are
/// subsequently loaded can be rewritten to load both input pointers and then
diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index f0e3ffdb95ac..0733daf40f39 100644
--- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -220,7 +220,7 @@ struct CFGSimplifyPass : public FunctionPass {
AU.addRequired<TargetTransformInfoWrapperPass>();
}
};
-}
+} // namespace
char CFGSimplifyPass::ID = 0;
INITIALIZE_PASS_BEGIN(CFGSimplifyPass, "simplifycfg", "Simplify the CFG", false,
diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp
index 078c6a921a08..f49f4eaaedcb 100644
--- a/lib/Transforms/Scalar/Sink.cpp
+++ b/lib/Transforms/Scalar/Sink.cpp
@@ -163,7 +163,7 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
}
if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
- AliasAnalysis::Location Loc = MemoryLocation::get(L);
+ MemoryLocation Loc = MemoryLocation::get(L);
for (Instruction *S : Stores)
if (AA->getModRefInfo(S, Loc) & AliasAnalysis::Mod)
return false;
diff --git a/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
index 453503ab61da..f32769c24110 100644
--- a/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp
@@ -265,8 +265,10 @@ static bool isGEPFoldable(GetElementPtrInst *GEP,
BaseOffset += DL->getStructLayout(STy)->getElementOffset(Field);
}
}
+
+ unsigned AddrSpace = GEP->getPointerAddressSpace();
return TTI->isLegalAddressingMode(GEP->getType()->getElementType(), BaseGV,
- BaseOffset, HasBaseReg, Scale);
+ BaseOffset, HasBaseReg, Scale, AddrSpace);
}
// Returns whether (Base + Index * Stride) can be folded to an addressing mode.
@@ -630,6 +632,15 @@ void StraightLineStrengthReduce::rewriteCandidateWithBasis(
// trivially dead.
RecursivelyDeleteTriviallyDeadInstructions(Bump);
} else {
+ // It's tempting to preserve nsw on Bump and/or Reduced. However, it's
+ // usually unsound, e.g.,
+ //
+ // X = (-2 +nsw 1) *nsw INT_MAX
+ // Y = (-2 +nsw 3) *nsw INT_MAX
+ // =>
+ // Y = X + 2 * INT_MAX
+ //
+ // Neither + and * in the resultant expression are nsw.
Reduced = Builder.CreateAdd(Basis.Ins, Bump);
}
break;
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 9eef1327c3f6..d23f5153c188 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -120,7 +120,7 @@ namespace {
bool CanMoveAboveCall(Instruction *I, CallInst *CI);
Value *CanTransformAccumulatorRecursion(Instruction *I, CallInst *CI);
};
-}
+} // namespace
char TailCallElim::ID = 0;
INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim",
@@ -158,6 +158,9 @@ bool TailCallElim::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
+ if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true")
+ return false;
+
bool AllCallsAreTailCalls = false;
bool Modified = markTails(F, AllCallsAreTailCalls);
if (AllCallsAreTailCalls)
@@ -243,7 +246,7 @@ struct AllocaDerivedValueTracker {
SmallPtrSet<Instruction *, 32> AllocaUsers;
SmallPtrSet<Instruction *, 32> EscapePoints;
};
-}
+} // namespace
bool TailCallElim::markTails(Function &F, bool &AllCallsAreTailCalls) {
if (F.callsFunctionThatReturnsTwice())
diff --git a/lib/Transforms/Utils/ASanStackFrameLayout.cpp b/lib/Transforms/Utils/ASanStackFrameLayout.cpp
index 03c3a80170a3..72cdfa464a3b 100644
--- a/lib/Transforms/Utils/ASanStackFrameLayout.cpp
+++ b/lib/Transforms/Utils/ASanStackFrameLayout.cpp
@@ -107,4 +107,4 @@ ComputeASanStackFrameLayout(SmallVectorImpl<ASanStackVariableDescription> &Vars,
assert(Layout->FrameSize / Granularity == Layout->ShadowBytes.size());
}
-} // llvm namespace
+} // namespace llvm
diff --git a/lib/Transforms/Utils/BasicBlockUtils.cpp b/lib/Transforms/Utils/BasicBlockUtils.cpp
index f3c801348a62..798376e95543 100644
--- a/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -486,11 +486,12 @@ BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
}
// Create new basic block, insert right before the original block.
- BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), BB->getName()+Suffix,
- BB->getParent(), BB);
+ BasicBlock *NewBB = BasicBlock::Create(
+ BB->getContext(), BB->getName() + Suffix, BB->getParent(), BB);
// The new block unconditionally branches to the old block.
BranchInst *BI = BranchInst::Create(BB, NewBB);
+ BI->setDebugLoc(BB->getFirstNonPHI()->getDebugLoc());
// Move the edges from Preds to point to NewBB instead of BB.
for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
@@ -553,6 +554,7 @@ void llvm::SplitLandingPadPredecessors(BasicBlock *OrigBB,
// The new block unconditionally branches to the old block.
BranchInst *BI1 = BranchInst::Create(OrigBB, NewBB1);
+ BI1->setDebugLoc(OrigBB->getFirstNonPHI()->getDebugLoc());
// Move the edges from Preds to point to NewBB1 instead of OrigBB.
for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
@@ -593,6 +595,7 @@ void llvm::SplitLandingPadPredecessors(BasicBlock *OrigBB,
// The new block unconditionally branches to the old block.
BranchInst *BI2 = BranchInst::Create(OrigBB, NewBB2);
+ BI2->setDebugLoc(OrigBB->getFirstNonPHI()->getDebugLoc());
// Move the remaining edges from OrigBB to point to NewBB2.
for (SmallVectorImpl<BasicBlock*>::iterator
diff --git a/lib/Transforms/Utils/BreakCriticalEdges.cpp b/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 7e83c9eeceb7..362cd9bbee7b 100644
--- a/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -60,7 +60,7 @@ namespace {
AU.addPreservedID(LoopSimplifyID);
}
};
-}
+} // namespace
char BreakCriticalEdges::ID = 0;
INITIALIZE_PASS(BreakCriticalEdges, "break-crit-edges",
diff --git a/lib/Transforms/Utils/BypassSlowDivision.cpp b/lib/Transforms/Utils/BypassSlowDivision.cpp
index f2d5e0745035..0771b29b24fd 100644
--- a/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -42,7 +42,7 @@ namespace {
DivPhiNodes(PHINode *InQuotient, PHINode *InRemainder)
: Quotient(InQuotient), Remainder(InRemainder) {}
};
-}
+} // namespace
namespace llvm {
template<>
@@ -69,7 +69,7 @@ namespace llvm {
};
typedef DenseMap<DivOpInfo, DivPhiNodes> DivCacheTy;
-}
+} // namespace llvm
// insertFastDiv - Substitutes the div/rem instruction with code that checks the
// value of the operands and uses a shorter-faster div/rem instruction when
diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp
index 4f8d1dfbe5df..e623445b284b 100644
--- a/lib/Transforms/Utils/CloneFunction.cpp
+++ b/lib/Transforms/Utils/CloneFunction.cpp
@@ -289,7 +289,7 @@ namespace {
BasicBlock::const_iterator StartingInst,
std::vector<const BasicBlock*> &ToClone);
};
-}
+} // namespace
/// The specified block is found to be reachable, clone it and
/// anything that it can reach.
diff --git a/lib/Transforms/Utils/CtorUtils.cpp b/lib/Transforms/Utils/CtorUtils.cpp
index dc95089cd2ca..4bbded8dc998 100644
--- a/lib/Transforms/Utils/CtorUtils.cpp
+++ b/lib/Transforms/Utils/CtorUtils.cpp
@@ -162,4 +162,4 @@ bool optimizeGlobalCtorsList(Module &M,
return true;
}
-} // End llvm namespace
+} // namespace llvm
diff --git a/lib/Transforms/Utils/FlattenCFG.cpp b/lib/Transforms/Utils/FlattenCFG.cpp
index 4eb3e3dd17d2..40a48c067907 100644
--- a/lib/Transforms/Utils/FlattenCFG.cpp
+++ b/lib/Transforms/Utils/FlattenCFG.cpp
@@ -46,7 +46,7 @@ public:
FlattenCFGOpt(AliasAnalysis *AA) : AA(AA) {}
bool run(BasicBlock *BB);
};
-}
+} // namespace
/// If \param [in] BB has more than one predecessor that is a conditional
/// branch, attempt to use parallel and/or for the branch condition. \returns
diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp
index ddeaff06d3c8..ea84e7c302d1 100644
--- a/lib/Transforms/Utils/InlineFunction.cpp
+++ b/lib/Transforms/Utils/InlineFunction.cpp
@@ -121,7 +121,7 @@ namespace {
}
}
};
-}
+} // namespace
/// Get or create a target for the branch from ResumeInsts.
BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
@@ -949,35 +949,23 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
}
// Get the personality function from the callee if it contains a landing pad.
- Value *CalleePersonality = nullptr;
- for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
- I != E; ++I)
- if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
- const BasicBlock *BB = II->getUnwindDest();
- const LandingPadInst *LP = BB->getLandingPadInst();
- CalleePersonality = LP->getPersonalityFn();
- break;
- }
+ Constant *CalledPersonality =
+ CalledFunc->hasPersonalityFn() ? CalledFunc->getPersonalityFn() : nullptr;
// Find the personality function used by the landing pads of the caller. If it
// exists, then check to see that it matches the personality function used in
// the callee.
- if (CalleePersonality) {
- for (Function::const_iterator I = Caller->begin(), E = Caller->end();
- I != E; ++I)
- if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
- const BasicBlock *BB = II->getUnwindDest();
- const LandingPadInst *LP = BB->getLandingPadInst();
-
- // If the personality functions match, then we can perform the
- // inlining. Otherwise, we can't inline.
- // TODO: This isn't 100% true. Some personality functions are proper
- // supersets of others and can be used in place of the other.
- if (LP->getPersonalityFn() != CalleePersonality)
- return false;
-
- break;
- }
+ Constant *CallerPersonality =
+ Caller->hasPersonalityFn() ? Caller->getPersonalityFn() : nullptr;
+ if (CalledPersonality) {
+ if (!CallerPersonality)
+ Caller->setPersonalityFn(CalledPersonality);
+ // If the personality functions match, then we can perform the
+ // inlining. Otherwise, we can't inline.
+ // TODO: This isn't 100% true. Some personality functions are proper
+ // supersets of others and can be used in place of the other.
+ else if (CalledPersonality != CallerPersonality)
+ return false;
}
// Get an iterator to the last basic block in the function, which will have
diff --git a/lib/Transforms/Utils/InstructionNamer.cpp b/lib/Transforms/Utils/InstructionNamer.cpp
index da890a297005..c9bec9a9fa79 100644
--- a/lib/Transforms/Utils/InstructionNamer.cpp
+++ b/lib/Transforms/Utils/InstructionNamer.cpp
@@ -50,7 +50,7 @@ namespace {
};
char InstNamer::ID = 0;
-}
+} // namespace
INITIALIZE_PASS(InstNamer, "instnamer",
"Assign names to anonymous instructions", false, false)
diff --git a/lib/Transforms/Utils/LCSSA.cpp b/lib/Transforms/Utils/LCSSA.cpp
index 9d40b6989d6e..fcc79864219e 100644
--- a/lib/Transforms/Utils/LCSSA.cpp
+++ b/lib/Transforms/Utils/LCSSA.cpp
@@ -300,7 +300,7 @@ struct LCSSA : public FunctionPass {
AU.addPreserved<ScalarEvolution>();
}
};
-}
+} // namespace
char LCSSA::ID = 0;
INITIALIZE_PASS_BEGIN(LCSSA, "lcssa", "Loop-Closed SSA Form Pass", false, false)
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index 70c77b06d62e..56085579b61c 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -14,6 +14,8 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
@@ -828,64 +830,45 @@ bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
/// orders them so it usually won't matter.
///
bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
- bool Changed = false;
-
// This implementation doesn't currently consider undef operands
// specially. Theoretically, two phis which are identical except for
// one having an undef where the other doesn't could be collapsed.
- // Map from PHI hash values to PHI nodes. If multiple PHIs have
- // the same hash value, the element is the first PHI in the
- // linked list in CollisionMap.
- DenseMap<uintptr_t, PHINode *> HashMap;
+ struct PHIDenseMapInfo {
+ static PHINode *getEmptyKey() {
+ return DenseMapInfo<PHINode *>::getEmptyKey();
+ }
+ static PHINode *getTombstoneKey() {
+ return DenseMapInfo<PHINode *>::getTombstoneKey();
+ }
+ static unsigned getHashValue(PHINode *PN) {
+ // Compute a hash value on the operands. Instcombine will likely have
+ // sorted them, which helps expose duplicates, but we have to check all
+ // the operands to be safe in case instcombine hasn't run.
+ return static_cast<unsigned>(hash_combine(
+ hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
+ hash_combine_range(PN->block_begin(), PN->block_end())));
+ }
+ static bool isEqual(PHINode *LHS, PHINode *RHS) {
+ if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
+ RHS == getEmptyKey() || RHS == getTombstoneKey())
+ return LHS == RHS;
+ return LHS->isIdenticalTo(RHS);
+ }
+ };
- // Maintain linked lists of PHI nodes with common hash values.
- DenseMap<PHINode *, PHINode *> CollisionMap;
+ // Set of unique PHINodes.
+ DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
// Examine each PHI.
- for (BasicBlock::iterator I = BB->begin();
- PHINode *PN = dyn_cast<PHINode>(I++); ) {
- // Compute a hash value on the operands. Instcombine will likely have sorted
- // them, which helps expose duplicates, but we have to check all the
- // operands to be safe in case instcombine hasn't run.
- uintptr_t Hash = 0;
- // This hash algorithm is quite weak as hash functions go, but it seems
- // to do a good enough job for this particular purpose, and is very quick.
- for (User::op_iterator I = PN->op_begin(), E = PN->op_end(); I != E; ++I) {
- Hash ^= reinterpret_cast<uintptr_t>(static_cast<Value *>(*I));
- Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7));
- }
- for (PHINode::block_iterator I = PN->block_begin(), E = PN->block_end();
- I != E; ++I) {
- Hash ^= reinterpret_cast<uintptr_t>(static_cast<BasicBlock *>(*I));
- Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7));
- }
- // Avoid colliding with the DenseMap sentinels ~0 and ~0-1.
- Hash >>= 1;
- // If we've never seen this hash value before, it's a unique PHI.
- std::pair<DenseMap<uintptr_t, PHINode *>::iterator, bool> Pair =
- HashMap.insert(std::make_pair(Hash, PN));
- if (Pair.second) continue;
- // Otherwise it's either a duplicate or a hash collision.
- for (PHINode *OtherPN = Pair.first->second; ; ) {
- if (OtherPN->isIdenticalTo(PN)) {
- // A duplicate. Replace this PHI with its duplicate.
- PN->replaceAllUsesWith(OtherPN);
- PN->eraseFromParent();
- Changed = true;
- break;
- }
- // A non-duplicate hash collision.
- DenseMap<PHINode *, PHINode *>::iterator I = CollisionMap.find(OtherPN);
- if (I == CollisionMap.end()) {
- // Set this PHI to be the head of the linked list of colliding PHIs.
- PHINode *Old = Pair.first->second;
- Pair.first->second = PN;
- CollisionMap[PN] = Old;
- break;
- }
- // Proceed to the next PHI in the list.
- OtherPN = I->second;
+ bool Changed = false;
+ for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
+ auto Inserted = PHISet.insert(PN);
+ if (!Inserted.second) {
+ // A duplicate. Replace this PHI with its duplicate.
+ PN->replaceAllUsesWith(*Inserted.first);
+ PN->eraseFromParent();
+ Changed = true;
}
}
@@ -1173,10 +1156,11 @@ static void changeToCall(InvokeInst *II) {
II->eraseFromParent();
}
-static bool markAliveBlocks(BasicBlock *BB,
+static bool markAliveBlocks(Function &F,
SmallPtrSetImpl<BasicBlock*> &Reachable) {
SmallVector<BasicBlock*, 128> Worklist;
+ BasicBlock *BB = F.begin();
Worklist.push_back(BB);
Reachable.insert(BB);
bool Changed = false;
@@ -1247,7 +1231,7 @@ static bool markAliveBlocks(BasicBlock *BB,
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
changeToUnreachable(II, true);
Changed = true;
- } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(II)) {
+ } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
if (II->use_empty() && II->onlyReadsMemory()) {
// jump to the normal destination branch.
BranchInst::Create(II->getNormalDest(), II);
@@ -1272,7 +1256,7 @@ static bool markAliveBlocks(BasicBlock *BB,
/// otherwise.
bool llvm::removeUnreachableBlocks(Function &F) {
SmallPtrSet<BasicBlock*, 128> Reachable;
- bool Changed = markAliveBlocks(F.begin(), Reachable);
+ bool Changed = markAliveBlocks(F, Reachable);
// If there are unreachable blocks in the CFG...
if (Reachable.size() == F.size())
diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp
index 90dfabaeb356..8b0afa69d974 100644
--- a/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/lib/Transforms/Utils/LoopSimplify.cpp
@@ -144,8 +144,6 @@ BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, Pass *PP) {
PreheaderBB = SplitBlockPredecessors(Header, OutsideBlocks, ".preheader",
AA, DT, LI, PreserveLCSSA);
- PreheaderBB->getTerminator()->setDebugLoc(
- Header->getFirstNonPHI()->getDebugLoc());
DEBUG(dbgs() << "LoopSimplify: Creating pre-header "
<< PreheaderBB->getName() << "\n");
@@ -778,7 +776,7 @@ namespace {
/// verifyAnalysis() - Verify LoopSimplifyForm's guarantees.
void verifyAnalysis() const override;
};
-}
+} // namespace
char LoopSimplify::ID = 0;
INITIALIZE_PASS_BEGIN(LoopSimplify, "loop-simplify",
diff --git a/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index d1774dfa10d9..919b45d3c7b1 100644
--- a/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -113,6 +113,7 @@ static void ConnectProlog(Loop *L, Value *BECount, unsigned Count,
// Create a branch around the orignal loop, which is taken if there are no
// iterations remaining to be executed after running the prologue.
Instruction *InsertPt = PrologEnd->getTerminator();
+ IRBuilder<> B(InsertPt);
assert(Count != 0 && "nonsensical Count!");
@@ -120,9 +121,8 @@ static void ConnectProlog(Loop *L, Value *BECount, unsigned Count,
// (since Count is a power of 2). This means %xtraiter is (BECount + 1) and
// and all of the iterations of this loop were executed by the prologue. Note
// that if BECount <u (Count - 1) then (BECount + 1) cannot unsigned-overflow.
- Instruction *BrLoopExit =
- new ICmpInst(InsertPt, ICmpInst::ICMP_ULT, BECount,
- ConstantInt::get(BECount->getType(), Count - 1));
+ Value *BrLoopExit =
+ B.CreateICmpULT(BECount, ConstantInt::get(BECount->getType(), Count - 1));
BasicBlock *Exit = L->getUniqueExitBlock();
assert(Exit && "Loop must have a single exit block only");
// Split the exit to maintain loop canonicalization guarantees
@@ -130,7 +130,7 @@ static void ConnectProlog(Loop *L, Value *BECount, unsigned Count,
SplitBlockPredecessors(Exit, Preds, ".unr-lcssa", AA, DT, LI,
P->mustPreserveAnalysisID(LCSSAID));
// Add the branch to the exit block (around the unrolled loop)
- BranchInst::Create(Exit, NewPH, BrLoopExit, InsertPt);
+ B.CreateCondBr(BrLoopExit, Exit, NewPH);
InsertPt->eraseFromParent();
}
@@ -184,23 +184,22 @@ static void CloneLoopBlocks(Loop *L, Value *NewIter, const bool UnrollProlog,
VMap.erase((*BB)->getTerminator());
BasicBlock *FirstLoopBB = cast<BasicBlock>(VMap[Header]);
BranchInst *LatchBR = cast<BranchInst>(NewBB->getTerminator());
+ IRBuilder<> Builder(LatchBR);
if (UnrollProlog) {
- LatchBR->eraseFromParent();
- BranchInst::Create(InsertBot, NewBB);
+ Builder.CreateBr(InsertBot);
} else {
PHINode *NewIdx = PHINode::Create(NewIter->getType(), 2, "prol.iter",
FirstLoopBB->getFirstNonPHI());
- IRBuilder<> Builder(LatchBR);
Value *IdxSub =
Builder.CreateSub(NewIdx, ConstantInt::get(NewIdx->getType(), 1),
NewIdx->getName() + ".sub");
Value *IdxCmp =
Builder.CreateIsNotNull(IdxSub, NewIdx->getName() + ".cmp");
- BranchInst::Create(FirstLoopBB, InsertBot, IdxCmp, NewBB);
+ Builder.CreateCondBr(IdxCmp, FirstLoopBB, InsertBot);
NewIdx->addIncoming(NewIter, InsertTop);
NewIdx->addIncoming(IdxSub, NewBB);
- LatchBR->eraseFromParent();
}
+ LatchBR->eraseFromParent();
}
}
@@ -370,7 +369,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count,
// Branch to either the extra iterations or the cloned/unrolled loop
// We will fix up the true branch label when adding loop body copies
- BranchInst::Create(PEnd, PEnd, BranchVal, PreHeaderBR);
+ B.CreateCondBr(BranchVal, PEnd, PEnd);
assert(PreHeaderBR->isUnconditional() &&
PreHeaderBR->getSuccessor(0) == PEnd &&
"CFG edges in Preheader are not correct");
diff --git a/lib/Transforms/Utils/LoopUtils.cpp b/lib/Transforms/Utils/LoopUtils.cpp
index 5f25e6b2cb6f..5cbde94a98ed 100644
--- a/lib/Transforms/Utils/LoopUtils.cpp
+++ b/lib/Transforms/Utils/LoopUtils.cpp
@@ -26,17 +26,17 @@ using namespace llvm::PatternMatch;
#define DEBUG_TYPE "loop-utils"
-bool ReductionDescriptor::areAllUsesIn(Instruction *I,
- SmallPtrSetImpl<Instruction *> &Set) {
+bool RecurrenceDescriptor::areAllUsesIn(Instruction *I,
+ SmallPtrSetImpl<Instruction *> &Set) {
for (User::op_iterator Use = I->op_begin(), E = I->op_end(); Use != E; ++Use)
if (!Set.count(dyn_cast<Instruction>(*Use)))
return false;
return true;
}
-bool ReductionDescriptor::AddReductionVar(PHINode *Phi, ReductionKind Kind,
- Loop *TheLoop, bool HasFunNoNaNAttr,
- ReductionDescriptor &RedDes) {
+bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurrenceKind Kind,
+ Loop *TheLoop, bool HasFunNoNaNAttr,
+ RecurrenceDescriptor &RedDes) {
if (Phi->getNumIncomingValues() != 2)
return false;
@@ -66,7 +66,7 @@ bool ReductionDescriptor::AddReductionVar(PHINode *Phi, ReductionKind Kind,
// the number of instruction we saw from the recognized min/max pattern,
// to make sure we only see exactly the two instructions.
unsigned NumCmpSelectPatternInst = 0;
- ReductionInstDesc ReduxDesc(false, nullptr);
+ InstDesc ReduxDesc(false, nullptr);
SmallPtrSet<Instruction *, 8> VisitedInsts;
SmallVector<Instruction *, 8> Worklist;
@@ -111,8 +111,8 @@ bool ReductionDescriptor::AddReductionVar(PHINode *Phi, ReductionKind Kind,
return false;
// Any reduction instruction must be of one of the allowed kinds.
- ReduxDesc = isReductionInstr(Cur, Kind, ReduxDesc, HasFunNoNaNAttr);
- if (!ReduxDesc.isReduction())
+ ReduxDesc = isRecurrenceInstr(Cur, Kind, ReduxDesc, HasFunNoNaNAttr);
+ if (!ReduxDesc.isRecurrence())
return false;
// A reduction operation must only have one use of the reduction value.
@@ -164,7 +164,7 @@ bool ReductionDescriptor::AddReductionVar(PHINode *Phi, ReductionKind Kind,
// Process instructions only once (termination). Each reduction cycle
// value must only be used once, except by phi nodes and min/max
// reductions which are represented as a cmp followed by a select.
- ReductionInstDesc IgnoredVal(false, nullptr);
+ InstDesc IgnoredVal(false, nullptr);
if (VisitedInsts.insert(UI).second) {
if (isa<PHINode>(UI))
PHIs.push_back(UI);
@@ -173,7 +173,7 @@ bool ReductionDescriptor::AddReductionVar(PHINode *Phi, ReductionKind Kind,
} else if (!isa<PHINode>(UI) &&
((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
!isa<SelectInst>(UI)) ||
- !isMinMaxSelectCmpPattern(UI, IgnoredVal).isReduction()))
+ !isMinMaxSelectCmpPattern(UI, IgnoredVal).isRecurrence()))
return false;
// Remember that we completed the cycle.
@@ -197,11 +197,11 @@ bool ReductionDescriptor::AddReductionVar(PHINode *Phi, ReductionKind Kind,
// only have a single instruction with out-of-loop users.
// The ExitInstruction(Instruction which is allowed to have out-of-loop users)
- // is saved as part of the ReductionDescriptor.
+ // is saved as part of the RecurrenceDescriptor.
// Save the description of this reduction variable.
- ReductionDescriptor RD(RdxStart, ExitInstruction, Kind,
- ReduxDesc.getMinMaxKind());
+ RecurrenceDescriptor RD(RdxStart, ExitInstruction, Kind,
+ ReduxDesc.getMinMaxKind());
RedDes = RD;
@@ -210,9 +210,8 @@ bool ReductionDescriptor::AddReductionVar(PHINode *Phi, ReductionKind Kind,
/// Returns true if the instruction is a Select(ICmp(X, Y), X, Y) instruction
/// pattern corresponding to a min(X, Y) or max(X, Y).
-ReductionInstDesc
-ReductionDescriptor::isMinMaxSelectCmpPattern(Instruction *I,
- ReductionInstDesc &Prev) {
+RecurrenceDescriptor::InstDesc
+RecurrenceDescriptor::isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev) {
assert((isa<ICmpInst>(I) || isa<FCmpInst>(I) || isa<SelectInst>(I)) &&
"Expect a select instruction");
@@ -223,84 +222,83 @@ ReductionDescriptor::isMinMaxSelectCmpPattern(Instruction *I,
// select.
if ((Cmp = dyn_cast<ICmpInst>(I)) || (Cmp = dyn_cast<FCmpInst>(I))) {
if (!Cmp->hasOneUse() || !(Select = dyn_cast<SelectInst>(*I->user_begin())))
- return ReductionInstDesc(false, I);
- return ReductionInstDesc(Select, Prev.getMinMaxKind());
+ return InstDesc(false, I);
+ return InstDesc(Select, Prev.getMinMaxKind());
}
// Only handle single use cases for now.
if (!(Select = dyn_cast<SelectInst>(I)))
- return ReductionInstDesc(false, I);
+ return InstDesc(false, I);
if (!(Cmp = dyn_cast<ICmpInst>(I->getOperand(0))) &&
!(Cmp = dyn_cast<FCmpInst>(I->getOperand(0))))
- return ReductionInstDesc(false, I);
+ return InstDesc(false, I);
if (!Cmp->hasOneUse())
- return ReductionInstDesc(false, I);
+ return InstDesc(false, I);
Value *CmpLeft;
Value *CmpRight;
// Look for a min/max pattern.
if (m_UMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
- return ReductionInstDesc(Select, ReductionInstDesc::MRK_UIntMin);
+ return InstDesc(Select, MRK_UIntMin);
else if (m_UMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
- return ReductionInstDesc(Select, ReductionInstDesc::MRK_UIntMax);
+ return InstDesc(Select, MRK_UIntMax);
else if (m_SMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
- return ReductionInstDesc(Select, ReductionInstDesc::MRK_SIntMax);
+ return InstDesc(Select, MRK_SIntMax);
else if (m_SMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
- return ReductionInstDesc(Select, ReductionInstDesc::MRK_SIntMin);
+ return InstDesc(Select, MRK_SIntMin);
else if (m_OrdFMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
- return ReductionInstDesc(Select, ReductionInstDesc::MRK_FloatMin);
+ return InstDesc(Select, MRK_FloatMin);
else if (m_OrdFMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
- return ReductionInstDesc(Select, ReductionInstDesc::MRK_FloatMax);
+ return InstDesc(Select, MRK_FloatMax);
else if (m_UnordFMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
- return ReductionInstDesc(Select, ReductionInstDesc::MRK_FloatMin);
+ return InstDesc(Select, MRK_FloatMin);
else if (m_UnordFMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
- return ReductionInstDesc(Select, ReductionInstDesc::MRK_FloatMax);
+ return InstDesc(Select, MRK_FloatMax);
- return ReductionInstDesc(false, I);
+ return InstDesc(false, I);
}
-ReductionInstDesc ReductionDescriptor::isReductionInstr(Instruction *I,
- ReductionKind Kind,
- ReductionInstDesc &Prev,
- bool HasFunNoNaNAttr) {
+RecurrenceDescriptor::InstDesc
+RecurrenceDescriptor::isRecurrenceInstr(Instruction *I, RecurrenceKind Kind,
+ InstDesc &Prev, bool HasFunNoNaNAttr) {
bool FP = I->getType()->isFloatingPointTy();
bool FastMath = FP && I->hasUnsafeAlgebra();
switch (I->getOpcode()) {
default:
- return ReductionInstDesc(false, I);
+ return InstDesc(false, I);
case Instruction::PHI:
if (FP &&
(Kind != RK_FloatMult && Kind != RK_FloatAdd && Kind != RK_FloatMinMax))
- return ReductionInstDesc(false, I);
- return ReductionInstDesc(I, Prev.getMinMaxKind());
+ return InstDesc(false, I);
+ return InstDesc(I, Prev.getMinMaxKind());
case Instruction::Sub:
case Instruction::Add:
- return ReductionInstDesc(Kind == RK_IntegerAdd, I);
+ return InstDesc(Kind == RK_IntegerAdd, I);
case Instruction::Mul:
- return ReductionInstDesc(Kind == RK_IntegerMult, I);
+ return InstDesc(Kind == RK_IntegerMult, I);
case Instruction::And:
- return ReductionInstDesc(Kind == RK_IntegerAnd, I);
+ return InstDesc(Kind == RK_IntegerAnd, I);
case Instruction::Or:
- return ReductionInstDesc(Kind == RK_IntegerOr, I);
+ return InstDesc(Kind == RK_IntegerOr, I);
case Instruction::Xor:
- return ReductionInstDesc(Kind == RK_IntegerXor, I);
+ return InstDesc(Kind == RK_IntegerXor, I);
case Instruction::FMul:
- return ReductionInstDesc(Kind == RK_FloatMult && FastMath, I);
+ return InstDesc(Kind == RK_FloatMult && FastMath, I);
case Instruction::FSub:
case Instruction::FAdd:
- return ReductionInstDesc(Kind == RK_FloatAdd && FastMath, I);
+ return InstDesc(Kind == RK_FloatAdd && FastMath, I);
case Instruction::FCmp:
case Instruction::ICmp:
case Instruction::Select:
if (Kind != RK_IntegerMinMax &&
(!HasFunNoNaNAttr || Kind != RK_FloatMinMax))
- return ReductionInstDesc(false, I);
+ return InstDesc(false, I);
return isMinMaxSelectCmpPattern(I, Prev);
}
}
-bool ReductionDescriptor::hasMultipleUsesOf(
+bool RecurrenceDescriptor::hasMultipleUsesOf(
Instruction *I, SmallPtrSetImpl<Instruction *> &Insts) {
unsigned NumUses = 0;
for (User::op_iterator Use = I->op_begin(), E = I->op_end(); Use != E;
@@ -313,8 +311,8 @@ bool ReductionDescriptor::hasMultipleUsesOf(
return false;
}
-bool ReductionDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
- ReductionDescriptor &RedDes) {
+bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
+ RecurrenceDescriptor &RedDes) {
bool HasFunNoNaNAttr = false;
BasicBlock *Header = TheLoop->getHeader();
@@ -366,7 +364,8 @@ bool ReductionDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
/// This function returns the identity element (or neutral element) for
/// the operation K.
-Constant *ReductionDescriptor::getReductionIdentity(ReductionKind K, Type *Tp) {
+Constant *RecurrenceDescriptor::getRecurrenceIdentity(RecurrenceKind K,
+ Type *Tp) {
switch (K) {
case RK_IntegerXor:
case RK_IntegerAdd:
@@ -386,12 +385,12 @@ Constant *ReductionDescriptor::getReductionIdentity(ReductionKind K, Type *Tp) {
// Adding zero to a number does not change it.
return ConstantFP::get(Tp, 0.0L);
default:
- llvm_unreachable("Unknown reduction kind");
+ llvm_unreachable("Unknown recurrence kind");
}
}
-/// This function translates the reduction kind to an LLVM binary operator.
-unsigned ReductionDescriptor::getReductionBinOp(ReductionKind Kind) {
+/// This function translates the recurrence kind to an LLVM binary operator.
+unsigned RecurrenceDescriptor::getRecurrenceBinOp(RecurrenceKind Kind) {
switch (Kind) {
case RK_IntegerAdd:
return Instruction::Add;
@@ -412,41 +411,39 @@ unsigned ReductionDescriptor::getReductionBinOp(ReductionKind Kind) {
case RK_FloatMinMax:
return Instruction::FCmp;
default:
- llvm_unreachable("Unknown reduction operation");
+ llvm_unreachable("Unknown recurrence operation");
}
}
-Value *
-ReductionDescriptor::createMinMaxOp(IRBuilder<> &Builder,
- ReductionInstDesc::MinMaxReductionKind RK,
- Value *Left, Value *Right) {
+Value *RecurrenceDescriptor::createMinMaxOp(IRBuilder<> &Builder,
+ MinMaxRecurrenceKind RK,
+ Value *Left, Value *Right) {
CmpInst::Predicate P = CmpInst::ICMP_NE;
switch (RK) {
default:
- llvm_unreachable("Unknown min/max reduction kind");
- case ReductionInstDesc::MRK_UIntMin:
+ llvm_unreachable("Unknown min/max recurrence kind");
+ case MRK_UIntMin:
P = CmpInst::ICMP_ULT;
break;
- case ReductionInstDesc::MRK_UIntMax:
+ case MRK_UIntMax:
P = CmpInst::ICMP_UGT;
break;
- case ReductionInstDesc::MRK_SIntMin:
+ case MRK_SIntMin:
P = CmpInst::ICMP_SLT;
break;
- case ReductionInstDesc::MRK_SIntMax:
+ case MRK_SIntMax:
P = CmpInst::ICMP_SGT;
break;
- case ReductionInstDesc::MRK_FloatMin:
+ case MRK_FloatMin:
P = CmpInst::FCMP_OLT;
break;
- case ReductionInstDesc::MRK_FloatMax:
+ case MRK_FloatMax:
P = CmpInst::FCMP_OGT;
break;
}
Value *Cmp;
- if (RK == ReductionInstDesc::MRK_FloatMin ||
- RK == ReductionInstDesc::MRK_FloatMax)
+ if (RK == MRK_FloatMin || RK == MRK_FloatMax)
Cmp = Builder.CreateFCmp(P, Left, Right, "rdx.minmax.cmp");
else
Cmp = Builder.CreateICmp(P, Left, Right, "rdx.minmax.cmp");
diff --git a/lib/Transforms/Utils/LowerSwitch.cpp b/lib/Transforms/Utils/LowerSwitch.cpp
index e0e0e9009495..c1b0645c7cbc 100644
--- a/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/lib/Transforms/Utils/LowerSwitch.cpp
@@ -101,7 +101,7 @@ namespace {
return CI1->getValue().slt(CI2->getValue());
}
};
-}
+} // namespace
char LowerSwitch::ID = 0;
INITIALIZE_PASS(LowerSwitch, "lowerswitch",
@@ -364,9 +364,9 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
std::sort(Cases.begin(), Cases.end(), CaseCmp());
// Merge case into clusters
- if (Cases.size()>=2)
- for (CaseItr I = Cases.begin(), J = std::next(Cases.begin());
- J != Cases.end();) {
+ if (Cases.size() >= 2) {
+ CaseItr I = Cases.begin();
+ for (CaseItr J = std::next(I), E = Cases.end(); J != E; ++J) {
int64_t nextValue = J->Low->getSExtValue();
int64_t currentValue = I->High->getSExtValue();
BasicBlock* nextBB = J->BB;
@@ -374,13 +374,16 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
// If the two neighboring cases go to the same destination, merge them
// into a single case.
- if ((nextValue-currentValue==1) && (currentBB == nextBB)) {
+ assert(nextValue > currentValue && "Cases should be strictly ascending");
+ if ((nextValue == currentValue + 1) && (currentBB == nextBB)) {
I->High = J->High;
- J = Cases.erase(J);
- } else {
- I = J++;
+ // FIXME: Combine branch weights.
+ } else if (++I != J) {
+ *I = *J;
}
}
+ Cases.erase(std::next(I), Cases.end());
+ }
for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
if (I->Low != I->High)
@@ -476,12 +479,10 @@ void LowerSwitch::processSwitchInst(SwitchInst *SI) {
// cases.
assert(MaxPop > 0 && PopSucc);
Default = PopSucc;
- for (CaseItr I = Cases.begin(); I != Cases.end();) {
- if (I->BB == PopSucc)
- I = Cases.erase(I);
- else
- ++I;
- }
+ Cases.erase(std::remove_if(
+ Cases.begin(), Cases.end(),
+ [PopSucc](const CaseRange &R) { return R.BB == PopSucc; }),
+ Cases.end());
// If there are no cases left, just branch.
if (Cases.empty()) {
diff --git a/lib/Transforms/Utils/MetaRenamer.cpp b/lib/Transforms/Utils/MetaRenamer.cpp
index 395a46bad97b..46dd65e9def6 100644
--- a/lib/Transforms/Utils/MetaRenamer.cpp
+++ b/lib/Transforms/Utils/MetaRenamer.cpp
@@ -131,7 +131,7 @@ namespace {
return true;
}
};
-}
+} // namespace
char MetaRenamer::ID = 0;
INITIALIZE_PASS(MetaRenamer, "metarenamer",
diff --git a/lib/Transforms/Utils/SSAUpdater.cpp b/lib/Transforms/Utils/SSAUpdater.cpp
index 88b39dd7f664..c09889875805 100644
--- a/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/lib/Transforms/Utils/SSAUpdater.cpp
@@ -303,7 +303,7 @@ public:
}
};
-} // End llvm namespace
+} // namespace llvm
/// Check to see if AvailableVals has an entry for the specified BB and if so,
/// return it. If not, construct SSA form by first calculating the required
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 60ac271bceb7..3d7ab0fd65a9 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -136,7 +136,7 @@ public:
: TTI(TTI), DL(DL), BonusInstThreshold(BonusInstThreshold), AC(AC) {}
bool run(BasicBlock *BB);
};
-}
+} // namespace
/// SafeToMergeTerminators - Return true if it is safe to merge these two
/// terminator instructions together.
@@ -502,7 +502,7 @@ private:
}
};
-}
+} // namespace
static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
Instruction *Cond = nullptr;
@@ -3717,7 +3717,7 @@ namespace {
// For ArrayKind, this is the array.
GlobalVariable *Array;
};
-}
+} // namespace
SwitchLookupTable::SwitchLookupTable(
Module &M, uint64_t TableSize, ConstantInt *Offset,
@@ -4058,7 +4058,7 @@ static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
return false;
// Figure out the corresponding result for each case value and phi node in the
- // common destination, as well as the the min and max case values.
+ // common destination, as well as the min and max case values.
assert(SI->case_begin() != SI->case_end());
SwitchInst::CaseIt CI = SI->case_begin();
ConstantInt *MinCaseVal = CI.getCaseValue();
diff --git a/lib/Transforms/Utils/SimplifyIndVar.cpp b/lib/Transforms/Utils/SimplifyIndVar.cpp
index ab30aa17c76b..68986ac0894f 100644
--- a/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -77,7 +77,7 @@ namespace {
Instruction *splitOverflowIntrinsic(Instruction *IVUser,
const DominatorTree *DT);
};
-}
+} // namespace
/// Fold an IV operand into its use. This removes increments of an
/// aligned IV when used by a instruction that ignores the low bits.
diff --git a/lib/Transforms/Utils/SimplifyInstructions.cpp b/lib/Transforms/Utils/SimplifyInstructions.cpp
index c499c87b1f0b..0a583a5af27a 100644
--- a/lib/Transforms/Utils/SimplifyInstructions.cpp
+++ b/lib/Transforms/Utils/SimplifyInstructions.cpp
@@ -100,7 +100,7 @@ namespace {
return Changed;
}
};
-}
+} // namespace
char InstSimplifier::ID = 0;
INITIALIZE_PASS_BEGIN(InstSimplifier, "instsimplify",
diff --git a/lib/Transforms/Utils/SymbolRewriter.cpp b/lib/Transforms/Utils/SymbolRewriter.cpp
index a2a54da8590c..4cc278fe7278 100644
--- a/lib/Transforms/Utils/SymbolRewriter.cpp
+++ b/lib/Transforms/Utils/SymbolRewriter.cpp
@@ -538,7 +538,7 @@ void RewriteSymbols::loadAndParseMapFiles() {
for (const auto &MapFile : MapFiles)
parser.parse(MapFile, &Descriptors);
}
-}
+} // namespace
INITIALIZE_PASS(RewriteSymbols, "rewrite-symbols", "Rewrite Symbols", false,
false)
diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp
index 215d6f9a1eb6..fd7661ffd41f 100644
--- a/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -3192,7 +3192,7 @@ namespace {
DEBUG(dbgs() << "BBV: final: \n" << BB << "\n");
}
-}
+} // namespace
char BBVectorize::ID = 0;
static const char bb_vectorize_name[] = "Basic-Block Vectorization";
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index 95c9381985ab..b7faa204927d 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -872,7 +872,7 @@ public:
/// ReductionList contains the reduction descriptors for all
/// of the reductions that were found in the loop.
- typedef DenseMap<PHINode*, ReductionDescriptor> ReductionList;
+ typedef DenseMap<PHINode *, RecurrenceDescriptor> ReductionList;
/// InductionList saves induction variables and maps them to the
/// induction descriptor.
@@ -2906,7 +2906,7 @@ struct CSEDenseMapInfo {
return LHS->isIdenticalTo(RHS);
}
};
-}
+} // namespace
/// \brief Check whether this block is a predicated block.
/// Due to if predication of stores we might create a sequence of "if(pred) a[i]
@@ -3093,13 +3093,13 @@ void InnerLoopVectorizer::vectorizeLoop() {
// Find the reduction variable descriptor.
assert(Legal->getReductionVars()->count(RdxPhi) &&
"Unable to find the reduction variable");
- ReductionDescriptor RdxDesc = (*Legal->getReductionVars())[RdxPhi];
+ RecurrenceDescriptor RdxDesc = (*Legal->getReductionVars())[RdxPhi];
- ReductionDescriptor::ReductionKind RK = RdxDesc.getReductionKind();
- TrackingVH<Value> ReductionStartValue = RdxDesc.getReductionStartValue();
+ RecurrenceDescriptor::RecurrenceKind RK = RdxDesc.getRecurrenceKind();
+ TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
- ReductionInstDesc::MinMaxReductionKind MinMaxKind =
- RdxDesc.getMinMaxReductionKind();
+ RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind =
+ RdxDesc.getMinMaxRecurrenceKind();
setDebugLocFromInst(Builder, ReductionStartValue);
// We need to generate a reduction vector from the incoming scalar.
@@ -3116,8 +3116,8 @@ void InnerLoopVectorizer::vectorizeLoop() {
// one for multiplication, -1 for And.
Value *Identity;
Value *VectorStart;
- if (RK == ReductionDescriptor::RK_IntegerMinMax ||
- RK == ReductionDescriptor::RK_FloatMinMax) {
+ if (RK == RecurrenceDescriptor::RK_IntegerMinMax ||
+ RK == RecurrenceDescriptor::RK_FloatMinMax) {
// MinMax reduction have the start value as their identify.
if (VF == 1) {
VectorStart = Identity = ReductionStartValue;
@@ -3127,8 +3127,8 @@ void InnerLoopVectorizer::vectorizeLoop() {
}
} else {
// Handle other reduction kinds:
- Constant *Iden =
- ReductionDescriptor::getReductionIdentity(RK, VecTy->getScalarType());
+ Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity(
+ RK, VecTy->getScalarType());
if (VF == 1) {
Identity = Iden;
// This vector is the Identity vector where the first element is the
@@ -3185,7 +3185,7 @@ void InnerLoopVectorizer::vectorizeLoop() {
// Reduce all of the unrolled parts into a single vector.
Value *ReducedPartRdx = RdxParts[0];
- unsigned Op = ReductionDescriptor::getReductionBinOp(RK);
+ unsigned Op = RecurrenceDescriptor::getRecurrenceBinOp(RK);
setDebugLocFromInst(Builder, ReducedPartRdx);
for (unsigned part = 1; part < UF; ++part) {
if (Op != Instruction::ICmp && Op != Instruction::FCmp)
@@ -3194,7 +3194,7 @@ void InnerLoopVectorizer::vectorizeLoop() {
Builder.CreateBinOp((Instruction::BinaryOps)Op, RdxParts[part],
ReducedPartRdx, "bin.rdx"));
else
- ReducedPartRdx = ReductionDescriptor::createMinMaxOp(
+ ReducedPartRdx = RecurrenceDescriptor::createMinMaxOp(
Builder, MinMaxKind, ReducedPartRdx, RdxParts[part]);
}
@@ -3226,8 +3226,8 @@ void InnerLoopVectorizer::vectorizeLoop() {
TmpVec = addFastMathFlag(Builder.CreateBinOp(
(Instruction::BinaryOps)Op, TmpVec, Shuf, "bin.rdx"));
else
- TmpVec = ReductionDescriptor::createMinMaxOp(Builder, MinMaxKind,
- TmpVec, Shuf);
+ TmpVec = RecurrenceDescriptor::createMinMaxOp(Builder, MinMaxKind,
+ TmpVec, Shuf);
}
// The result is in the first element of the vector.
@@ -4040,8 +4040,8 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
continue;
}
- if (ReductionDescriptor::isReductionPHI(Phi, TheLoop,
- Reductions[Phi])) {
+ if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop,
+ Reductions[Phi])) {
AllowedExit.insert(Reductions[Phi].getLoopExitInstr());
continue;
}
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index a3a45c80d850..370e2956ac4f 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -315,12 +315,12 @@ static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
}
/// \returns the AA location that is being access by the instruction.
-static AliasAnalysis::Location getLocation(Instruction *I, AliasAnalysis *AA) {
+static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return MemoryLocation::get(SI);
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return MemoryLocation::get(LI);
- return AliasAnalysis::Location();
+ return MemoryLocation();
}
/// \returns True if the instruction is not a volatile or atomic load/store.
@@ -515,7 +515,7 @@ private:
///
/// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
/// is invariant in the calling loop.
- bool isAliased(const AliasAnalysis::Location &Loc1, Instruction *Inst1,
+ bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
Instruction *Inst2) {
// First check if the result is already in the cache.
@@ -524,7 +524,7 @@ private:
if (result.hasValue()) {
return result.getValue();
}
- AliasAnalysis::Location Loc2 = getLocation(Inst2, AA);
+ MemoryLocation Loc2 = getLocation(Inst2, AA);
bool aliased = true;
if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
// Do the alias check.
@@ -1637,8 +1637,10 @@ bool BoUpSLP::isFullyVectorizableTinyTree() {
if (VectorizableTree.size() != 2)
return false;
- // Handle splat stores.
- if (!VectorizableTree[0].NeedToGather && isSplat(VectorizableTree[1].Scalars))
+ // Handle splat and all-constants stores.
+ if (!VectorizableTree[0].NeedToGather &&
+ (allConstant(VectorizableTree[1].Scalars) ||
+ isSplat(VectorizableTree[1].Scalars)))
return true;
// Gathering cost would be too much for tiny trees.
@@ -2903,7 +2905,7 @@ void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
ScheduleData *DepDest = BundleMember->NextLoadStore;
if (DepDest) {
Instruction *SrcInst = BundleMember->Inst;
- AliasAnalysis::Location SrcLoc = getLocation(SrcInst, SLP->AA);
+ MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
unsigned numAliased = 0;
unsigned DistToSrc = 1;